input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
expected = DateTime(2018, 10, 1, 12, 34, 56, 123456789,
tzinfo=FixedOffset(754))
actual = DateTime.from_iso_format("2018-10-01T12:34:56.123456789+12:34:56.123456")
assert expected == actual
def test_from_iso_format_with_negative_long_tz(self):
expected = DateTime(2018, 10, 1, 12, 34, 56, 123456789,
tzinfo=FixedOffset(-754))
actual = DateTime.from_iso_format("2018-10-01T12:34:56.123456789-12:34:56.123456")
assert expected == actual
def test_datetime_copy(self):
d = DateTime(2010, 10, 1, 10, 0, 10)
d2 = copy.copy(d)
assert d is not d2
assert d == d2
def test_datetime_deep_copy(self):
d = DateTime(2010, 10, 1, 10, 0, 12)
d2 = copy.deepcopy(d)
assert d is not d2
assert d == d2
def test_iso_format_with_time_zone_case_1():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_iso_format_with_time_zone_case_1
expected = DateTime(2019, 10, 30, 7, 54, 2, 129790999, tzinfo=timezone_utc)
assert expected.iso_format() == "2019-10-30T07:54:02.129790999+00:00"
assert expected.tzinfo == FixedOffset(0)
actual = DateTime.from_iso_format("2019-10-30T07:54:02.129790999+00:00")
assert expected == actual
def test_iso_format_with_time_zone_case_2():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_iso_format_with_time_zone_case_2
expected = DateTime.from_iso_format("2019-10-30T07:54:02.129790999+01:00")
assert expected.tzinfo == FixedOffset(60)
assert expected.iso_format() == "2019-10-30T07:54:02.129790999+01:00"
def test_to_native_case_1():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_to_native_case_1
dt = DateTime.from_iso_format("2019-10-30T12:34:56.789123456")
native = dt.to_native()
assert native.hour == dt.hour
assert native.minute == dt.minute
assert nano_add(native.second, nano_div(native.microsecond, 1000000)) == 56.789123
assert native.tzinfo is None
assert native.isoformat() == "2019-10-30T12:34:56.789123"
def test_to_native_case_2():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_to_native_case_2
dt = DateTime.from_iso_format("2019-10-30T12:34:56.789123456+00:00")
native = dt.to_native()
assert native.hour == dt.hour
assert native.minute == dt.minute
assert nano_add(native.second, nano_div(native.microsecond, 1000000)) == 56.789123
assert native.tzinfo == FixedOffset(0)
assert native.isoformat() == "2019-10-30T12:34:56.789123+00:00"
def test_to_native_case_3():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_to_native_case_3
timestamp = "2021-04-06T00:00:00.500006+00:00"
neo4j_datetime = DateTime.from_iso_format(timestamp)
native_from_neo4j = neo4j_datetime.to_native()
native_from_datetime = datetime(2021, 4, 6, 0, 0, 0, 500006,
tzinfo=timezone_utc)
assert neo4j_datetime == native_from_datetime
assert native_from_neo4j == native_from_datetime
def test_from_native_case_1():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_from_native_case_1
native = datetime(2018, 10, 1, 12, 34, 56, 789123)
dt = DateTime.from_native(native)
assert dt.year == native.year
assert dt.month == native.month
assert dt.day == native.day
assert dt.hour == native.hour
assert dt.minute == native.minute
assert dt.second == native.second
assert dt.nanosecond == native.microsecond * 1000
assert dt.tzinfo is None
def test_from_native_case_2():
# python -m pytest tests/unit/time/test_datetime.py -s -v -k test_from_native_case_2
native = datetime(2018, 10, 1, 12, 34, 56, 789123, FixedOffset(0))
dt = DateTime.from_native(native)
assert dt.year == native.year
assert dt.month == native.month
assert dt.day == native.day
assert dt.hour == native.hour
assert dt.minute == native.minute
assert dt.second == native.second
assert dt.nanosecond == native.microsecond * 1000
assert dt.tzinfo == FixedOffset(0)
@pytest.mark.parametrize("datetime_cls", (DateTime, datetime))
def test_transition_to_summertime(datetime_cls):
dt = datetime_cls(2022, 3, 27, 1, 30)
dt = timezone_berlin.localize(dt)
assert dt.utcoffset() == timedelta(hours=1)
assert isinstance(dt, datetime_cls)
time = dt.time()
assert (time.hour, time.minute) == (1, 30)
dt += timedelta(hours=1)
# The native datetime object just bluntly carries over the timezone. You'd
# have to manually convert to UTC, do the calculation, and then convert
# back. Not pretty, but we should make sure our implementation does
assert dt.utcoffset() == timedelta(hours=1)
assert isinstance(dt, datetime_cls)
time = dt.time()
assert (time.hour, time.minute) == (2, 30)
@pytest.mark.parametrize("datetime_cls", (DateTime, datetime))
@pytest.mark.parametrize("utc_impl", (
utc,
datetime_timezone(timedelta(0)),
))
@pytest.mark.parametrize("tz", (
timezone_berlin, datetime_timezone(timedelta(hours=-1))
))
def test_transition_to_summertime_in_utc_space(datetime_cls, utc_impl, tz):
if datetime_cls == DateTime:
dt = datetime_cls(2022, 3, 27, 1, 30, 1, 123456789)
else:
dt = datetime_cls(2022, 3, 27, 1, 30, 1, 123456)
dt = timezone_berlin.localize(dt)
assert isinstance(dt, datetime_cls)
assert dt.utcoffset() == timedelta(hours=1)
time = dt.time()
assert (time.hour, time.minute, time.second) == (1, 30, 1)
if datetime_cls == DateTime:
assert time.nanosecond == 123456789
else:
assert time.microsecond == 123456
dt = dt.astimezone(utc_impl)
assert isinstance(dt, datetime_cls)
assert dt.utcoffset() == timedelta(0)
time = dt.time()
assert (time.hour, time.minute) == (0, 30)
dt += timedelta(hours=1)
assert isinstance(dt, datetime_cls)
assert dt.utcoffset() == timedelta(0)
time = dt.time()
assert (time.hour, time.minute) == (1, 30)
dt = dt.astimezone(timezone_berlin)
assert isinstance(dt, datetime_cls)
assert dt.utcoffset() == timedelta(hours=2)
time = dt.time()
assert (time.hour, time.minute) == (3, 30)
if datetime_cls == DateTime:
assert time.nanosecond == 123456789
else:
assert time.microsecond == 123456
@pytest.mark.parametrize(("dt1", "dt2"), (
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 34, 56, 789123000)
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456),
DateTime(2022, 11, 25, 12, 34, 56, 789123456)
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123000, FixedOffset(1))
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(-1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123000, FixedOffset(-1))
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(1))
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(-1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(-1))
),
(
DateTime(2022, 11, 25, 12, 35, 56, 789123456, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(0))
),
(
# Not testing our library directly, but asserting that Python's
# datetime implementation is aligned with ours.
datetime(2022, 11, 25, 12, 35, 56, 789123, FixedOffset(1)),
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(0))
),
(
datetime(2022, 11, 25, 12, 35, 56, 789123, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123000, FixedOffset(0))
),
(
DateTime(2022, 11, 25, 12, 35, 56, 789123123, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123123, FixedOffset(0))
),
(
timezone_london.localize(datetime(2022, 11, 25, 12, 34, 56, 789123)),
timezone_berlin.localize(datetime(2022, 11, 25, 13, 34, 56, 789123))
),
(
timezone_london.localize(datetime(2022, 11, 25, 12, 34, 56, 789123)),
timezone_berlin.localize(DateTime(2022, 11, 25, 13, 34, 56, 789123000))
),
(
timezone_london.localize(DateTime(2022, 1, 25, 12, 34, 56, 789123123)),
timezone_berlin.localize(DateTime(2022, 1, 25, 13, 34, 56, 789123123))
),
))
def test_equality(dt1, dt2):
assert dt1 == dt2
assert dt2 == dt1
assert dt1 <= dt2
assert dt2 <= dt1
assert dt1 >= dt2
assert dt2 >= dt1
@pytest.mark.parametrize(("dt1", "dt2"), (
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 34, 56, 789123001)
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 34, 56, 789124000)
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 34, 57, 789123000)
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 35, 56, 789123000)
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 13, 34, 56, 789123000)
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456),
DateTime(2022, 11, 25, 12, 34, 56, 789123450)
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456),
DateTime(2022, 11, 25, 12, 34, 57, 789123456)
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456),
DateTime(2022, 11, 25, 12, 35, 56, 789123456)
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456),
DateTime(2022, 11, 25, 13, 34, 56, 789123456)
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(2)),
DateTime(2022, 11, 25, 12, 34, 56, 789123000, FixedOffset(1))
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(-2)),
DateTime(2022, 11, 25, 12, 34, 56, 789123000, FixedOffset(-1))
),
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 34, 56, 789123000, FixedOffset(0))
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(2)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(1))
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(-2)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(-1))
),
(
DateTime(2022, 11, 25, 12, 34, 56, 789123456),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(0))
),
(
DateTime(2022, 11, 25, 13, 34, 56, 789123456, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(0))
),
(
DateTime(2022, 11, 25, 11, 34, 56, 789123456, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(0))
),
))
def test_inequality(dt1, dt2):
assert dt1 != dt2
assert dt2 != dt1
@pytest.mark.parametrize(
("dt1", "dt2"),
itertools.product(
(
datetime(2022, 11, 25, 12, 34, 56, 789123),
DateTime(2022, 11, 25, 12, 34, 56, 789123000),
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(0)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(0)),
datetime(2022, 11, 25, 12, 35, 56, 789123, FixedOffset(1)),
DateTime(2022, 11, 25, 12, 35, 56, 789123456, FixedOffset(1)),
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(-1)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456, FixedOffset(-1)),
datetime(2022, 11, 25, 12, 34, 56, 789123, FixedOffset(60 * -16)),
DateTime(2022, 11, 25, 12, 34, 56, 789123000,
FixedOffset(60 * -16)),
datetime(2022, 11, 25, 11, 34, 56, 789123, FixedOffset(60 * -17)),
DateTime(2022, 11, 25, 11, 34, 56, 789123000,
FixedOffset(60 * -17)),
DateTime(2022, 11, 25, 12, 34, 56, 789123456,
FixedOffset(60 * -16)),
DateTime(2022, 11, 25, 11, 34, 56, 789123456,
FixedOffset(60 * -17)),
),
repeat=2
| |
1 0 0 0 0]
[0 0 0 0 0 0 0 0 0 1]
"""
return self._differential.cohomology(n)
def cohomology_generators(self, max_degree):
"""
Return lifts of algebra generators for cohomology in degrees at
most ``max_degree``.
INPUT:
- ``max_degree`` -- integer
OUTPUT:
A dictionary keyed by degree, where the corresponding
value is a list of cohomology generators in that degree.
Actually, the elements are lifts of cohomology generators,
which means that they lie in this differential graded
algebra. It also means that they are only well-defined up to
cohomology, not on the nose.
ALGORITHM:
Use induction on degree, so assume we know what happens in
degrees less than `n`. Compute the cocycles `Z` in degree `n`.
Form a subspace `W` of this, spanned by the cocycles generated
by the lower degree generators, along with the coboundaries in
degree `n`. Find a basis for the complement of `W` in `Z`:
these represent cohomology generators.
EXAMPLES::
sage: A.<a,x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2))
sage: B = A.cdg_algebra(differential={y: a*x})
sage: B.cohomology_generators(3)
{1: [a], 2: [x], 3: [a*y]}
The previous example has infinitely generated cohomology:
$a y^n$ is a cohomology generator for each $n$::
sage: B.cohomology_generators(10)
{1: [a], 2: [x], 3: [a*y], 5: [a*y^2], 7: [a*y^3], 9: [a*y^4]}
In contrast, the corresponding algebra in characteristic $p$
has finitely generated cohomology::
sage: A3.<a,x,y> = GradedCommutativeAlgebra(GF(3), degrees=(1,2,2))
sage: B3 = A3.cdg_algebra(differential={y: a*x})
sage: B3.cohomology_generators(20)
{1: [a], 2: [x], 3: [a*y], 5: [a*y^2], 6: [y^3]}
This method works with both singly graded and multi-graded algebras::
sage: Cs.<a,b,c,d> = GradedCommutativeAlgebra(GF(2), degrees=(1,2,2,3))
sage: Ds = Cs.cdg_algebra({a:c, b:d})
sage: Ds.cohomology_generators(10)
{2: [a^2], 4: [b^2]}
sage: Cm.<a,b,c,d> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (1,1), (0,2), (0,3)))
sage: Dm = Cm.cdg_algebra({a:c, b:d})
sage: Dm.cohomology_generators(10)
{2: [a^2], 4: [b^2]}
TESTS:
Test that coboundaries do not appear as cohomology generators::
sage: X.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: acyclic = X.cdg_algebra({x: y})
sage: acyclic.cohomology_generators(3)
{}
"""
def vector_to_element(v, deg):
"""
If an element of this algebra in degree ``deg`` is represented
by a raw vector ``v``, convert it back to an element of the
algebra again.
"""
return sum(c*b for (c,b) in zip(v, self.basis(deg)))
field = self.base_ring()
# gens: dictionary indexed by degree. Value is a list of
# cohomology generators in that degree.
gens = {}
# cocycles: dictionary indexed by degree. Value is a spanning
# set for the cocycles in that degree.
cocycles = {0: self.one()}
for n in range(1, max_degree+1):
old_cocycles = []
for i in gens:
for g in gens[i]:
lowdim_cocycles = cocycles[n-i]
for x in lowdim_cocycles:
a = g*x
if a:
old_cocycles.append(a)
# Eliminate duplicates.
old_cocycles = set(old_cocycles)
# Convert elements of old_cocycles to raw vectors:
old_cocycles_raw = [cocyc.basis_coefficients(total=True)
for cocyc in old_cocycles]
old_cocycles_raw += self.coboundaries(n).basis()
cochains = VectorSpace(field, len(self.basis(n)))
W = cochains.submodule(old_cocycles_raw)
basis_of_complement = []
all_cocycles = self.cocycles(n).basis()
for z in all_cocycles:
if z not in W:
basis_of_complement.append(z)
cocycle_basis = [vector_to_element(coeffs, n)
for coeffs in basis_of_complement]
# Only keep nonempty lists of generators.
if cocycle_basis:
gens[n] = cocycle_basis
cocycles[n] = list(old_cocycles) + cocycle_basis
return gens
class Element(GCAlgebra.Element):
def differential(self):
"""
The differential on this element.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees = (2, 3, 2, 4))
sage: B = A.cdg_algebra({t: x*y, x: y, z: y})
sage: B.inject_variables()
Defining x, y, z, t
sage: x.differential()
y
sage: (-1/2 * x^2 + t).differential()
0
"""
return self.parent().differential()(self)
def is_coboundary(self):
"""
Return ``True`` if ``self`` is a coboundary and ``False``
otherwise.
This raises an error if the element is not homogeneous.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2))
sage: B = A.cdg_algebra(differential={b: a*c})
sage: x,y,z = B.gens()
sage: x.is_coboundary()
False
sage: (x*z).is_coboundary()
True
sage: (x*z+x*y).is_coboundary()
False
sage: (x*z+y**2).is_coboundary()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
"""
if not self.is_homogeneous():
raise ValueError('This element is not homogeneous')
# To avoid taking the degree of 0, we special-case it.
if self.is_zero():
return True
v = vector(self.basis_coefficients())
return v in self.parent().coboundaries(self.degree())
def is_cohomologous_to(self, other):
"""
Return ``True`` if ``self`` is cohomologous to ``other``
and ``False`` otherwise.
INPUT:
- ``other`` -- another element of this algebra
EXAMPLES::
sage: A.<a,b,c,d> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1,1))
sage: B = A.cdg_algebra(differential={a:b*c-c*d})
sage: w, x, y, z = B.gens()
sage: (x*y).is_cohomologous_to(y*z)
True
sage: (x*y).is_cohomologous_to(x*z)
False
sage: (x*y).is_cohomologous_to(x*y)
True
Two elements whose difference is not homogeneous are
cohomologous if and only if they are both coboundaries::
sage: w.is_cohomologous_to(y*z)
False
sage: (x*y-y*z).is_cohomologous_to(x*y*z)
True
sage: (x*y*z).is_cohomologous_to(0) # make sure 0 works
True
"""
if other.is_zero():
return self.is_coboundary()
if (not isinstance(other, DifferentialGCAlgebra.Element)
or self.parent() is not other.parent()):
raise ValueError('The element {} does not lie in this DGA'.format(other))
if (self - other).is_homogeneous():
return (self - other).is_coboundary()
else:
return (self.is_coboundary() and other.is_coboundary())
class DifferentialGCAlgebra_multigraded(DifferentialGCAlgebra, GCAlgebra_multigraded):
"""
A commutative differential multi-graded algebras.
INPUT:
- ``A`` -- a commutative multi-graded algebra
- ``differential`` -- a differential
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.basis((1,0))
[a]
sage: B.basis(1, total=True)
[b, a]
sage: B.cohomology((1, 0))
Free module generated by {} over Rational Field
sage: B.cohomology(1, total=True)
Free module generated by {[b]} over Rational Field
"""
def __init__(self, A, differential):
"""
Initialize ``self``.
INPUT:
- ``A`` -- a multi-graded commutative algebra
- ``differential`` -- a differential
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
Trying to define a differential which is not multi-graded::
sage: A.<t,x,y,z> = GradedCommutativeAlgebra(QQ, degrees=((1,0),(1,0),(2,0),(0,2)))
sage: B = A.cdg_algebra(differential={x:y}) # good
sage: B = A.cdg_algebra(differential={t:z}) # good
sage: B = A.cdg_algebra(differential={x:y, t:z}) # bad
Traceback (most recent call last):
...
ValueError: The differential does not have a well-defined degree
"""
GCAlgebra_multigraded.__init__(self, A.base(), names=A._names,
degrees=A._degrees_multi,
R=A.cover_ring(),
I=A.defining_ideal())
self._differential = Differential_multigraded(self, differential._dic_)
def _base_repr(self):
"""
Return the base string representation of ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: A.cdg_algebra(differential={a: c})._base_repr()
"Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field"
"""
s = DifferentialGCAlgebra._base_repr(self)
old = '{}'.format(self._degrees)
new = '{}'.format(self._degrees_multi)
return s.replace(old, new)
def coboundaries(self, n, total=False):
"""
The ``n``-th coboundary group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
- ``total`` (default ``False``) -- if ``True``, return the
coboundaries in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.coboundaries((0,2))
Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: B.coboundaries(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
"""
return self._differential.coboundaries(n, total)
def cocycles(self, n, total=False):
r"""
The ``n``-th cocycle group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cocycles in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.cocycles((0,1))
Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: B.cocycles((0,1), total=True)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 0]
"""
return self._differential.cocycles(n, total)
def cohomology_raw(self, n, total=False):
"""
The ``n``-th cohomology group of the algebra.
This is a vector space over the base ring, and it is returned
as the quotient cocycles/coboundaries.
Compare to :meth:`cohomology`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cohomology in total degree ``n``
| |
3 (which is not given in a d-gridded form)
R = zeros(m)
n = m*(mu_d-nu_d+1)
Matr = zeros(n,m+1)
Matr[:,1:] = E
T = zeros(n,m+1)
for i in range(E.cols):
T[:,0] = M*E[:,i]
N1 = (T+Matr).nullspace()
N2 = N1[0] #donne une matrice colonne
N2norm = 1/N2[0,0]*N2
coeff = - N2norm[1:,0]
R[:,i] = coeff
return R
# Let d be a positive integer and let B=A^(-1)(z^d).
# pair(n,vA,vAinv,p,d) returns [k,l] such that k+pl=n with k greater than or equal to the valuation of B at 0
# and l at least nu_d where vA is the valuation of A and vAinv is the valuation of A^(-1).
def pair(n,vA,vAinv,p,d):
L = []
for k in range(d*vAinv,n-p*math.ceil(d*vA/(p-1))+1):
if (n-k)%p == 0:
L.append([k,int((n-k)/p)])
return L
# If the Mahler system Y(z^p)=A(z)Y(z) is regular singular at 0 then we denote by Psi an associated gauge transformation and d its ramification.
# gaugetr(A,Ainv,p,d,E,matcst(A,p,d,E),n,nu_d,mu_d) returns the coefficients of Psi(z^d) from z^(nu_d) (nu_d being a lower bound of the valuation at 0 of Psi) to z^{nu_d+n-1}.
def gaugetr(A,Ainv,p,d,E,R,n,nu_d,mu_d):
m = A.rows
L = [E[i*m:(i+1)*m,:] for i in range(int(E.rows/m))]
Lz = [z**i for i in range(nu_d,mu_d+1)]
vA = val(A)
vAinv = val(Ainv)
if n <= mu_d-nu_d+1:
multLzL = [x*y for x,y in zip(L[:n],Lz[:n])]
thesum = zeros(m)
for i in range(n):
thesum = thesum + multLzL[i]
return thesum
for k in range(mu_d+1, nu_d+n):
indices = pair(k,vA,vAinv,p,d)
indices_k = [(indices[i])[0]-(indices[0])[0] for i in range(len(indices))]
indices_l = [(indices[i])[1]-nu_d for i in range(len(indices))]
devB = devmat(Ainv.subs(z,z**d), (indices[0])[0], (indices[-1])[0])
listBk = [devB[i] for i in indices_k]
listEl = [L[i] for i in indices_l]
prodBkEl = ([x*y for x,y in zip(listBk,listEl)])
thesum = zeros(m)
for i in range(len(prodBkEl)):
thesum = thesum + prodBkEl[i]
Ek = (thesum)*(R**(-1))
L.append(Ek)
Lz.append(z**k)
lastmult = [x*y for x,y in zip(L,Lz)]
thesum = zeros(m)
for i in range(len(lastmult)):
thesum = thesum + lastmult[i]
return thesum
###########################################################################
###########################################################################
# EXAMPLES :
############################EXAMPLE 1######################################
# Companion matrix associated with the Mahler equation of section 5.2, which also is Equation (2.1) of the article [CDDM18]:
# "Computing solutions of linear Mahler equations" of Chyzak, Dreyfus, Dumas, and Mezzarobba in Math. Comp., 87 :2977–3021, 2018.
# A=Matrix([[0,1], [-z**6*(1+z)*(1-z**21-z**30)/(z**3*(1-z**3+z**6)*(1-z**7-z**10)), (1-z**28-z**31-z**37-z**40)/(z**3*(1-z**3+z**6)*(1-z**7-z**10))]])
# AlgoRS(A,3) returns ['the system is regular singular', 2] so the system is regular singular with an associated gauge transformation that has ramification d=2.
# nu_d = math.ceil(d*val(A)/(p-1))=-3 (d=2 and p=3)
# mu_d = math.ceil(-d*val(A.inv())/(p-1))=6
# The matrix M_d of Algorithm 3 is M_d=Matrix([
# [-1, -3, 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 4, 0, 0, -1, -2, 0, 0, 1, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, -1, -3, 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -4, 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 1, 4, 0, 0, -1, -2, 0, 0, 1, 1, 0, 0, -1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 4, 0, 0, -1, -3, 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, -1, -4, 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -4, 0, 0, 1, 4, 0, 0, -1, -2, 0, 0, 1, 1, 0, 0, -1, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 1, 4, 0, 0, -1, -3, 0, 0, 1, 2, 0, 0, -1, -1, 0, 0, 1, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# and its d-gridded form is : blockmat_grdd(A**(-1),p,nu_d,mu_d,d)=
# [Matrix([
# [-1, -3, 1, 2, -1, -1, 1, 0, 0, 0],
# [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 4, -1, -2, 1, 1, -1, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -4, 1, 2, -1, -1, 1, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 4, -1, -3, 1, 2, -1, -1, 1, 0],
# [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [-1, -4, 1, 4, -1, -2, 1, 1, -1, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]),
# Matrix([
# [ 1, 2, -1, -1, 1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -3, 1, 2, -1, -1, 1, 0, 0, 0],
# [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 4, -1, -2, 1, 1, -1, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -4, 1, 2, -1, -1, 1, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 4, -1, -3, 1, 2, -1, -1, 1, 0],
# [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]),
# [0, 1]]
# The d-gridded form of the matrix N_d of Algorithm 3 is : matconditions_grdd(A**(-1),p,nu_d,mu_d,d)=
# [Matrix([
# [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -1, 1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 1, -1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -1, 1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 2, -1, -1, 1, 0, 0, 0, 0, 0],
# [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [-1, -2, 1, 1, -1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 2, -1, -1, 1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]),
# Matrix([
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# | |
import datetime
import functools
import json
import multiprocessing
import os
import sys
import tkinter as tk
from pathlib import Path
from tkinter import ttk, messagebox, filedialog
import keyboard
import win32api
import win32event
import winerror
from TextSpitter import TextSpitter
from tkdocviewer import DocViewer
from ttkbootstrap import Style
from AHFTSearch import FullTextSearch
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from dist import AHDiskIndexer, audio_to_text, AHFullTextIndexer, AHObjectDetector
from dist.shared import LOGGER, create_connection, get_sub_string, DATE_TIME_FORMAT, DIST_DIR, read_path_config, \
kb_to_mbs, convert_bytes
class ProcessAsync(multiprocessing.Process):
def __init__(self, target):
super(ProcessAsync, self).__init__()
self.target = target
def run(self) -> None:
self.target()
class App(tk.Tk, FullTextSearch):
list_box_cols = ('Filename', 'Module', 'Size', 'Created', 'Modified')
indexers = (AHDiskIndexer.start, AHFullTextIndexer.start, AHObjectDetector.start, audio_to_text.start)
indexer_process: ProcessAsync
query_entry = ""
def __init__(self):
super(App, self).__init__()
self.conn = create_connection()
self.config_file = os.path.join(DIST_DIR, 'ahsearch.config')
self.title('Full Disk Search')
self.geometry('1065x560+30+30')
self.iconbitmap(os.path.join(DIST_DIR, 'ahsearch.ico'))
style = Style(theme="cosmo")
style.configure('TEntry', font=('Helvetica', 12))
style.configure("TProgressbar", thickness=5)
self.style = style.master
highlight_color = '#96a89b'
style.map('TButton', bordercolor=[('focus !disabled', highlight_color)])
style.map('TEntry', bordercolor=[('focus !disabled', highlight_color)])
style.map('TRadiobutton', foreground=[('focus', highlight_color), ('selected', highlight_color)])
style.map('Treeview', bordercolor=[('focus', highlight_color)])
self.resizable(0, 0)
self.query_var = tk.StringVar()
self.dock_viewer: DocViewer
menubar = tk.Menu(self)
self.config(menu=menubar)
menubar.add_command(label='Home', command=self.home_page)
menubar.add_command(label='Indexer Config', command=self.config_page)
menubar.add_command(label='Settings', command=self.settings_page)
# Progress frame
self.progress_frame = ttk.Frame(self)
# configure the grid to place the progress bar is at the center
self.progress_frame.columnconfigure(0, weight=1)
self.progress_frame.rowconfigure(0, weight=1)
# progressbar
self.pb = ttk.Progressbar(self.progress_frame, orient=tk.HORIZONTAL, mode='indeterminate')
self.pb.grid(row=0, column=0, sticky=tk.EW)
# place the progress frame
self.progress_frame.grid(row=2, column=0, sticky=tk.NSEW, padx=10, pady=(0, 2))
self.empty_frame = tk.Frame(self, bg='#007bff')
self.empty_frame.columnconfigure(0, weight=1)
self.empty_frame.grid(row=2, column=0, sticky=tk.NSEW, padx=10, pady=(0, 2))
self.dock_viewer = None
self.active_frames = None
self.show_preview = tk.StringVar()
self.hot_key = tk.StringVar()
self.search_type = tk.StringVar()
self.indexer_type = tk.StringVar()
self.file_size = tk.StringVar()
data = self.read_data()
self.hot_key.set(data.get('hot_key', 'ctrl+shift+f'))
self.show_preview.set(data.get('preview', 'show'))
keyboard.add_hotkey(self.hot_key.get(), self.find_window_movetop, args=())
self.file_size.set(data.get('file_size', '5'))
self.home_page()
def find_window_movetop(self):
self.wm_deiconify()
self.attributes("-topmost", True)
self.focus_set()
self.focus_force()
self.grab_set()
self.query_entry.focus_set()
self.query_entry.focus()
def start_progress(self):
self.progress_frame.tkraise()
self.pb.start(5)
def stop_progress(self):
self.empty_frame.tkraise()
self.pb.stop()
def start_indexing(self, current_indexer=0):
indexer_value = int(self.indexer_type.get())
indexer_index = indexer_value - 1
if indexer_value < 5:
self.stop_indexing()
current_indexer = indexer_index
self.start_progress()
indexer = self.indexers[current_indexer]
self.indexer_process = ProcessAsync(target=indexer)
self.indexer_process.start()
self.monitor(current_indexer)
def stop_indexing(self):
if hasattr(self, 'indexer_process') and self.indexer_process and self.indexer_process.is_alive():
self.indexer_process.terminate()
self.stop_progress()
def monitor(self, current_indexer=0):
""" Monitor the download thread """
if self.indexer_process.is_alive():
self.after(100, lambda: self.monitor(current_indexer))
elif int(self.indexer_type.get()) == 5:
self.stop_progress()
current_indexer += 1
if current_indexer < 4:
self.start_indexing(current_indexer)
else:
self.stop_progress()
def write_widget(self, widget, data):
for line in data:
line = line.strip("\n")
widget.insert("end", line)
def read_data(self):
try:
with open(self.config_file) as open_file:
data = json.load(open_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
data = {}
return data
def write_config(self, data):
with open(self.config_file, 'w') as out_file:
out_file.write(data)
def read_config(self, included, excluded):
data = self.read_data()
self.write_widget(included, data.get('included', []))
self.write_widget(excluded, data.get('excluded', []))
def save_config(self, included, excluded):
self.start_progress()
included_items = list(included.get(i) for i in range(included.size()))
excluded_items = list(excluded.get(i) for i in range(excluded.size()))
data = json.dumps(dict(included=included_items, excluded=excluded_items), indent=4)
self.write_config(data)
self.stop_progress()
def remove_and_get(self, i, widget):
item = widget.get(i)
widget.delete(i)
return item
def remove_item(self, included, excluded):
included_removed = set(map(functools.partial(self.remove_and_get, widget=included), included.curselection()))
excluded_removed = set(map(functools.partial(self.remove_and_get, widget=excluded), excluded.curselection()))
data = self.read_data()
included_items = list(set(set(data.get('included', [])) - included_removed))
excluded_items = list(set(set(data.get('excluded', [])) - excluded_removed))
data = json.dumps(dict(included=included_items, excluded=excluded_items), indent=4)
self.write_config(data)
def file_preview(self, widget=None):
cur_item = widget.focus()
file = widget.item(cur_item)['values'][0]
base, ext = map(str.lower, os.path.splitext(file))
if ext in ('.pdf', '.docx', '.txt'):
text = TextSpitter(filename=file)
text = text.decode('utf-8') if isinstance(text, bytes) else text
self.dock_viewer.display_text(text[0:500] + '...')
elif self.dock_viewer.can_display(file):
self.dock_viewer.display_file(file, pages=1)
else:
file_stats = os.stat(file)
message = f"File: {file}\n"
message += "##############################\n"
message += f"Size: {convert_bytes(file_stats.st_size)}\n"
message += f"Creation: {self.epoch_to_date(file_stats.st_ctime)}\n"
message += f"Modification: {self.epoch_to_date(file_stats.st_mtime)}"
self.dock_viewer.display_text(message)
def show_hide_preview(self, widget=None):
if self.show_preview.get() == 'show':
self.dock_viewer.grid(row=0, column=2, sticky=tk.NSEW)
widget.column('Filename', width=368)
widget.column('Module', width=50)
widget.column('Size', width=90)
widget.column('Created', width=115)
widget.column('Modified', width=115)
else:
self.dock_viewer.grid_forget()
widget.column('Filename', width=650)
widget.column('Module', width=50)
widget.column('Size', width=90)
widget.column('Created', width=115)
widget.column('Modified', width=115)
widget.update()
def epoch_to_date(self, epoch_time):
return datetime.datetime.fromtimestamp(epoch_time).strftime(DATE_TIME_FORMAT)
def message(self, message, name="Error"):
methods = dict(Error=messagebox.showerror, Info=messagebox.showinfo, Warning=messagebox.showwarning)
methods[name](title=name, message=message)
def get_query(self, search):
search_type = int(self.search_type.get())
if search_type == 1:
substr = get_sub_string(search.split(" "), " AND filename LIKE ", "filename LIKE ", True)
query = f"SELECT *, 'files' as 'type' FROM files WHERE {substr}"
elif search_type == 2:
text_files = list(self.run_query(search))[:100]
substr = get_sub_string(text_files, " OR filename = ")
query = f"SELECT *, 'text' as 'type' FROM files WHERE {substr}"
elif search_type == 3:
query = "SELECT files.filename, size, creation, modification, 'files' as 'type' FROM files "
query += "INNER JOIN image_objects on files.filename=image_objects.filename "
query += "WHERE files.filename = image_objects.filename AND "
arg_1 = " AND image_objects.objects LIKE "
arg_2 = "image_objects.objects LIKE "
query += get_sub_string(search.split(" "), arg_1, arg_2, True)
elif search_type == 4:
query = "SELECT files.filename, size, creation, modification, 'files' as 'type' FROM files "
query += "INNER JOIN voices on files.filename=voices.filename "
query += "WHERE files.filename = voices.filename AND "
query += get_sub_string(search.split(" "), " AND voices.words LIKE ", "voices.words LIKE ", True)
else:
text_files = list(self.run_query(search))[:100]
sub_query = get_sub_string(text_files, ' OR filename = ')
query = "SELECT files.filename, files.size, files.creation, files.modification, ur.type as 'type' "
query += "FROM files INNER JOIN ( "
if sub_query:
query += f"SELECT 'text' as type, filename, size FROM files WHERE {sub_query} "
query += "UNION ALL "
query += f"SELECT 'files' as type, filename, size FROM files WHERE filename LIKE '%{search}%' "
query += "UNION ALL "
query += f"SELECT 'images' as type, filename, objects FROM image_objects WHERE objects LIKE '%{search}%' "
query += "UNION ALL "
query += f"SELECT 'voices' as type, filename, words FROM voices WHERE words LIKE '%{search}%' "
query += ") ur ON ur.filename=files.filename WHERE files.filename = ur.filename"
return query
def fill_treeview(self, widget):
query = self.query_var.get()
try:
assert query, "Please enter a query string."
query = self.get_query(query)
widget.delete(*widget.get_children())
assert not query.lower().endswith('where '), "No files were found."
files = self.conn.cursor().execute(query).fetchall()
assert len(files) > 0, f"'{self.query_var.get()}' related data not found."
for index, row in enumerate(files):
filename, size, creation, modification, table = row
utc_create = self.epoch_to_date(creation)
utc_mod = self.epoch_to_date(modification)
widget.insert("", "end", values=(filename, table, int(size), utc_create, utc_mod))
except AssertionError as error:
self.message(error.args[0], "Info")
except Exception as err:
LOGGER.error(err)
def open_target(self, event='file', widget=None, copy=False):
try:
cur_item = widget.focus()
cur_text = widget.item(cur_item)['values'][0]
target = cur_text if event == 'file' else str(Path(cur_text).parent)
if copy:
self.clipboard_clear()
self.clipboard_append(target)
self.update()
else:
os.startfile(target)
except Exception as error:
self.message(message=error.args[0])
def folder_select(self, folder_list):
answer = filedialog.askdirectory(parent=self, initialdir=os.environ['HOMEPATH'],
title="Please select a folder:")
folder_list.insert("end", str(Path(answer).absolute()))
def destroy_active_frames(self):
self.query_var.set('')
self.search_type.set('1')
self.indexer_type.set('1')
if self.active_frames:
for frame in self.active_frames: frame.destroy()
def config_page(self):
self.title('Configure Indexing')
self.destroy_active_frames()
config_file_frame = ttk.Frame(self)
config_file_frame.columnconfigure(0, weight=16)
config_file_frame.columnconfigure(1, weight=2)
config_file_frame.columnconfigure(2, weight=16)
frame_params = dict(column=0, sticky=tk.NSEW, padx=10)
config_file_frame.grid(row=0, pady=(10, 10), **frame_params)
grid_params = dict(row=0, sticky=tk.W)
select_button = ttk.Button(config_file_frame, text='Include', width=71)
select_button.grid(column=0, padx=(0, 5), **grid_params)
exclude_button = ttk.Button(config_file_frame, text="Exclude", width=71, style='secondary.TButton')
exclude_button.grid(column=2, **grid_params)
config_radio_frame = ttk.LabelFrame(self, text='Configuration Parameters')
config_radio_frame.grid(row=1, pady=(0, 0), ipady=5, **frame_params)
grid_params = dict(row=2, sticky=tk.E)
ttk.Label(config_radio_frame, text='Select Indexer: ').grid(column=0, **grid_params)
radio_params = dict(variable=self.indexer_type, width=12)
filename_indexer = tk.Radiobutton(config_radio_frame, text='File Info', value=1, **radio_params)
filename_indexer.grid(column=1, **grid_params)
fulltext_indexer = tk.Radiobutton(config_radio_frame, text='Full Text', value=2, **radio_params)
fulltext_indexer.grid(column=2, **grid_params)
radio_params['width'] = 17
image_objects_indexer = tk.Radiobutton(config_radio_frame, text='Image Labels', value=3, **radio_params)
image_objects_indexer.grid(column=3, **grid_params)
radio_params['width'] = 12
audio_search_indexer = tk.Radiobutton(config_radio_frame, text='Audio as Text', value=4, **radio_params)
audio_search_indexer.grid(column=4, **grid_params)
all_indexer = tk.Radiobutton(config_radio_frame, text='All Indexers', value=5, **radio_params)
all_indexer.grid(column=5, **grid_params)
list_frame = ttk.Frame(self)
list_frame.columnconfigure(0, weight=16)
list_frame.columnconfigure(1, weight=2)
list_frame.columnconfigure(2, weight=16)
list_frame.grid(row=3, pady=(0, 10), **frame_params)
ttk.Label(list_frame, text="Included Folders").grid(row=0, column=0, sticky=tk.W)
list_box = tk.Listbox(list_frame, width=70, height=21, borderwidth=0, selectmode='multiple')
list_box.grid(row=1, column=0, sticky=tk.EW)
select_button.config(command=lambda widget=list_box: self.folder_select(folder_list=widget))
ttk.Label(list_frame, text="Excluded Folders").grid(row=0, column=2, sticky=tk.W)
list_box_excluded = tk.Listbox(list_frame, width=70, height=21, borderwidth=0, selectmode='multiple')
exclude_button.config(command=lambda widget=list_box_excluded: self.folder_select(folder_list=widget))
list_box_excluded.grid(row=1, column=2, sticky=tk.EW)
action_frame = ttk.Frame(self)
action_frame.columnconfigure(0, weight=13)
action_frame.columnconfigure(1, weight=1)
action_frame.columnconfigure(2, weight=1)
action_frame.columnconfigure(3, weight=1)
action_frame.columnconfigure(4, weight=1)
action_frame.grid(row=4, pady=(0, 10), ipady=5, **frame_params)
grid_params = dict(row=0, sticky=tk.E)
delete_button = ttk.Button(action_frame, text='Delete', width=15, style='danger.TButton')
command = dict(command=lambda _incl=list_box, _excl=list_box_excluded: self.remove_item(_incl, _excl))
delete_button.config(**command)
delete_button.grid(column=1, **grid_params)
save_button = ttk.Button(action_frame, text='Save', width=15)
command = dict(command=lambda _incl=list_box, _excl=list_box_excluded: self.save_config(_incl, _excl))
save_button.config(**command)
save_button.grid(column=2, **grid_params)
indexer_button = ttk.Button(action_frame, text='Start Indexing', width=15, style='success.TButton')
indexer_button.config(command=self.start_indexing)
indexer_button.grid(column=3, **grid_params)
stop_indexer_button = ttk.Button(action_frame, text='Stop Indexing', style='danger.TButton')
stop_indexer_button.config(command=self.stop_indexing)
stop_indexer_button.grid(column=4, **grid_params)
self.read_config(list_box, list_box_excluded)
self.active_frames = (config_file_frame, config_radio_frame, list_frame, action_frame)
def do_popup(self, event, popup):
try:
popup.tk_popup(event.x_root, event.y_root)
finally:
popup.grab_release()
def home_page(self):
self.destroy_active_frames()
self.title('AH Disk Search')
query_frame = ttk.Frame(self)
query_frame.columnconfigure(0, weight=1)
query_frame.columnconfigure(1, weight=17)
query_frame.columnconfigure(2, weight=1)
query_frame.columnconfigure(3, weight=2)
frame_params = dict(column=0, sticky=tk.NSEW, padx=10)
query_frame.grid(row=0, pady=(15, 10), **frame_params)
label = ttk.Label(query_frame, text='Search: ')
label.grid(column=0, row=0, sticky=tk.W)
query_entry = ttk.Entry(query_frame, textvariable=self.query_var, width=114, style='TEntry')
self.query_entry = query_entry
query_entry.focus()
query_entry.grid(column=1, row=0, sticky=tk.EW)
search_button = ttk.Button(query_frame, text='Search', width=20)
| |
import os
import shutil
import pytest
from freezegun import freeze_time
from great_expectations import DataContext
from great_expectations.core import RunIdentifier
from great_expectations.data_context.store import ExpectationsStore, ValidationsStore
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
ValidationResultIdentifier,
)
from great_expectations.data_context.util import (
file_relative_path,
instantiate_class_from_config,
)
from great_expectations.render.renderer.site_builder import SiteBuilder
def assert_how_to_buttons(
context,
index_page_locator_info: str,
index_links_dict: dict,
show_how_to_buttons=True,
):
"""Helper function to assert presence or non-presence of how-to buttons and related content in various
Data Docs pages.
"""
# these are simple checks for presence of certain page elements
show_walkthrough_button = "Show Walkthrough"
walkthrough_modal = "Great Expectations Walkthrough"
cta_footer = (
"To continue exploring Great Expectations check out one of these tutorials..."
)
how_to_edit_suite_button = "How to Edit This Suite"
how_to_edit_suite_modal = "How to Edit This Expectation Suite"
action_card = "Actions"
how_to_page_elements_dict = {
"index_pages": [show_walkthrough_button, walkthrough_modal, cta_footer],
"expectation_suites": [
how_to_edit_suite_button,
how_to_edit_suite_modal,
show_walkthrough_button,
walkthrough_modal,
],
"validation_results": [
how_to_edit_suite_button,
how_to_edit_suite_modal,
show_walkthrough_button,
walkthrough_modal,
],
"profiling_results": [action_card, show_walkthrough_button, walkthrough_modal],
}
data_docs_site_dir = os.path.join(
context._context_root_directory,
context._project_config.data_docs_sites["local_site"]["store_backend"][
"base_directory"
],
)
page_paths_dict = {
"index_pages": [index_page_locator_info[7:]],
"expectation_suites": [
os.path.join(data_docs_site_dir, link_dict["filepath"])
for link_dict in index_links_dict.get("expectations_links", [])
],
"validation_results": [
os.path.join(data_docs_site_dir, link_dict["filepath"])
for link_dict in index_links_dict.get("validations_links", [])
],
"profiling_results": [
os.path.join(data_docs_site_dir, link_dict["filepath"])
for link_dict in index_links_dict.get("profiling_links", [])
],
}
for page_type, page_paths in page_paths_dict.items():
for page_path in page_paths:
with open(page_path, "r") as f:
page = f.read()
for how_to_element in how_to_page_elements_dict[page_type]:
if show_how_to_buttons:
assert how_to_element in page
else:
assert how_to_element not in page
@freeze_time("09/26/2019 13:42:41")
@pytest.mark.rendered_output
def test_configuration_driven_site_builder(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
context.add_validation_operator(
"validate_and_store",
{
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validations_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
)
# profiling the Titanic datasource will generate one expectation suite and one validation
# that is a profiling result
datasource_name = "titanic"
data_asset_name = "Titanic"
profiler_name = "BasicDatasetProfiler"
generator_name = "subdir_reader"
context.profile_datasource(datasource_name)
# creating another validation result using the profiler's suite (no need to use a new expectation suite
# for this test). having two validation results - one with run id "profiling" - allows us to test
# the logic of run_name_filter that helps filtering validation results to be included in
# the profiling and the validation sections.
batch_kwargs = context.build_batch_kwargs(
datasource=datasource_name,
batch_kwargs_generator=generator_name,
data_asset_name=data_asset_name,
)
expectation_suite_name = "{}.{}.{}.{}".format(
datasource_name, generator_name, data_asset_name, profiler_name
)
batch = context.get_batch(
batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name,
)
run_id = RunIdentifier(run_name="test_run_id_12345")
context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="validate_and_store",
)
data_docs_config = context._project_config.data_docs_sites
local_site_config = data_docs_config["local_site"]
validations_set = set(context.stores["validations_store"].list_keys())
assert len(validations_set) == 6
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="test_run_id_12345",
batch_identifier=batch.batch_id,
)
in validations_set
)
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="profiling",
batch_identifier=batch.batch_id,
)
in validations_set
)
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="profiling",
batch_identifier=batch.batch_id,
)
in validations_set
)
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="profiling",
batch_identifier=batch.batch_id,
)
in validations_set
)
site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**local_site_config
)
res = site_builder.build()
index_page_locator_info = res[0]
index_links_dict = res[1]
# assert that how-to buttons and related elements are rendered (default behavior)
assert_how_to_buttons(context, index_page_locator_info, index_links_dict)
# print(json.dumps(index_page_locator_info, indent=2))
assert (
index_page_locator_info
== "file://"
+ context.root_directory
+ "/uncommitted/data_docs/local_site/index.html"
)
# print(json.dumps(index_links_dict, indent=2))
assert "site_name" in index_links_dict
assert "expectations_links" in index_links_dict
assert len(index_links_dict["expectations_links"]) == 5
assert "validations_links" in index_links_dict
assert (
len(index_links_dict["validations_links"]) == 1
), """
The only rendered validation should be the one not generated by the profiler
"""
assert "profiling_links" in index_links_dict
assert len(index_links_dict["profiling_links"]) == 5
# save documentation locally
os.makedirs("./tests/render/output", exist_ok=True)
os.makedirs("./tests/render/output/documentation", exist_ok=True)
if os.path.isdir("./tests/render/output/documentation"):
shutil.rmtree("./tests/render/output/documentation")
shutil.copytree(
os.path.join(
site_builder_data_context_with_html_store_titanic_random.root_directory,
"uncommitted/data_docs/",
),
"./tests/render/output/documentation",
)
# let's create another validation result and run the site builder to add it
# to the data docs
# the operator does not have an StoreValidationResultAction action configured, so the site
# will not be updated without our call to site builder
expectation_suite_path_component = expectation_suite_name.replace(".", "/")
validation_result_page_path = os.path.join(
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory,
"validations",
expectation_suite_path_component,
run_id.run_name,
run_id.run_time.strftime("%Y%m%dT%H%M%S.%fZ"),
batch.batch_id + ".html",
)
ts_last_mod_0 = os.path.getmtime(validation_result_page_path)
run_id = RunIdentifier(run_name="test_run_id_12346")
operator_result = context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="validate_and_store",
)
validation_result_id = operator_result.list_validation_result_identifiers()[0]
res = site_builder.build(resource_identifiers=[validation_result_id])
index_links_dict = res[1]
# verify that an additional validation result HTML file was generated
assert len(index_links_dict["validations_links"]) == 2
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory
# verify that the validation result HTML file rendered in the previous run was NOT updated
ts_last_mod_1 = os.path.getmtime(validation_result_page_path)
assert ts_last_mod_0 == ts_last_mod_1
# verify that the new method of the site builder that returns the URL of the HTML file that renders
# a resource
new_validation_result_page_path = os.path.join(
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory,
"validations",
expectation_suite_path_component,
run_id.run_name,
run_id.run_time.strftime("%Y%m%dT%H%M%S.%fZ"),
batch.batch_id + ".html",
)
html_url = site_builder.get_resource_url(resource_identifier=validation_result_id)
assert "file://" + new_validation_result_page_path == html_url
html_url = site_builder.get_resource_url()
assert (
"file://"
+ os.path.join(
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory,
"index.html",
)
== html_url
)
team_site_config = data_docs_config["team_site"]
team_site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**team_site_config
)
team_site_builder.clean_site()
obs = [
url_dict
for url_dict in context.get_docs_sites_urls(site_name="team_site")
if url_dict.get("site_url")
]
assert len(obs) == 0
# exercise clean_site
site_builder.clean_site()
obs = [
url_dict
for url_dict in context.get_docs_sites_urls()
if url_dict.get("site_url")
]
assert len(obs) == 0
# restore site
context = site_builder_data_context_with_html_store_titanic_random
site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**local_site_config
)
res = site_builder.build()
@pytest.mark.rendered_output
def test_configuration_driven_site_builder_without_how_to_buttons(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
context.add_validation_operator(
"validate_and_store",
{
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validations_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
)
# profiling the Titanic datasource will generate one expectation suite and one validation
# that is a profiling result
datasource_name = "titanic"
data_asset_name = "Titanic"
profiler_name = "BasicDatasetProfiler"
generator_name = "subdir_reader"
context.profile_datasource(datasource_name)
# creating another validation result using the profiler's suite (no need to use a new expectation suite
# for this test). having two validation results - one with run id "profiling" - allows us to test
# the logic of run_name_filter that helps filtering validation results to be included in
# the profiling and the validation sections.
batch_kwargs = context.build_batch_kwargs(
datasource=datasource_name,
batch_kwargs_generator=generator_name,
name=data_asset_name,
)
expectation_suite_name = "{}.{}.{}.{}".format(
datasource_name, generator_name, data_asset_name, profiler_name
)
batch = context.get_batch(
batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name,
)
run_id = "test_run_id_12345"
context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="validate_and_store",
)
data_docs_config = context._project_config.data_docs_sites
local_site_config = data_docs_config["local_site"]
# set this flag to false in config to hide how-to buttons and related elements
local_site_config["show_how_to_buttons"] = False
site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**local_site_config
)
res = site_builder.build()
index_page_locator_info = res[0]
index_links_dict = res[1]
assert_how_to_buttons(
context, index_page_locator_info, index_links_dict, show_how_to_buttons=False
)
def test_site_builder_with_custom_site_section_builders_config(tmp_path_factory):
"""Test that site builder can handle partially specified custom site_section_builders config"""
base_dir = str(tmp_path_factory.mktemp("project_dir"))
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
# fixture config swaps site section builder source stores and specifies custom run_name_filters
shutil.copy(
file_relative_path(
__file__, "../test_fixtures/great_expectations_custom_local_site_config.yml"
),
str(os.path.join(project_dir, "great_expectations.yml")),
)
context = DataContext(context_root_dir=project_dir)
local_site_config = context._project_config.data_docs_sites.get("local_site")
module_name = "great_expectations.render.renderer.site_builder"
site_builder = instantiate_class_from_config(
config=local_site_config,
runtime_environment={
"data_context": context,
"root_directory": context.root_directory,
"site_name": "local_site",
},
config_defaults={"module_name": module_name},
)
site_section_builders = site_builder.site_section_builders
expectations_site_section_builder = site_section_builders["expectations"]
assert isinstance(expectations_site_section_builder.source_store, ValidationsStore)
validations_site_section_builder = site_section_builders["validations"]
assert isinstance(validations_site_section_builder.source_store, ExpectationsStore)
assert validations_site_section_builder.run_name_filter == {
"ne": "custom_validations_filter"
}
profiling_site_section_builder = site_section_builders["profiling"]
assert isinstance(validations_site_section_builder.source_store, ExpectationsStore)
assert profiling_site_section_builder.run_name_filter == {
"eq": "custom_profiling_filter"
}
@freeze_time("09/24/2019 23:18:36")
def test_site_builder_usage_statistics_enabled(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
sites = (
site_builder_data_context_with_html_store_titanic_random._project_config_with_variables_substituted.data_docs_sites
)
local_site_config = sites["local_site"]
site_builder = instantiate_class_from_config(
config=local_site_config,
runtime_environment={
"data_context": context,
"root_directory": context.root_directory,
"site_name": "local_site",
},
config_defaults={
"module_name": "great_expectations.render.renderer.site_builder"
},
)
site_builder_return_obj = site_builder.build()
index_page_path = site_builder_return_obj[0]
links_dict = site_builder_return_obj[1]
expectation_suite_pages = [
file_relative_path(index_page_path, expectation_suite_link_dict["filepath"])
for expectation_suite_link_dict in links_dict["expectations_links"]
]
profiling_results_pages = [
file_relative_path(index_page_path, profiling_link_dict["filepath"])
for profiling_link_dict in links_dict["profiling_links"]
]
page_paths_to_check = (
[index_page_path] + expectation_suite_pages + profiling_results_pages
)
expected_logo_url = "https://great-expectations-web-assets.s3.us-east-2.amazonaws.com/logo-long.png?d=20190924T231836.000000Z&dataContextId=f43d4897-385f-4366-82b0-1a8eda2bf79c"
for page_path in page_paths_to_check:
with open(page_path[7:]) as f:
page_contents = f.read()
assert expected_logo_url in page_contents
@freeze_time("09/24/2019 23:18:36")
def test_site_builder_usage_statistics_disabled(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
context._project_config.anonymous_usage_statistics = {
"enabled": False,
"data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c",
}
data_context_id = context.anonymous_usage_statistics["data_context_id"]
sites = (
site_builder_data_context_with_html_store_titanic_random._project_config_with_variables_substituted.data_docs_sites
)
local_site_config = sites["local_site"]
site_builder = instantiate_class_from_config(
config=local_site_config,
runtime_environment={
"data_context": context,
"root_directory": context.root_directory,
"site_name": "local_site",
},
config_defaults={
"module_name": "great_expectations.render.renderer.site_builder"
},
)
site_builder_return_obj = site_builder.build()
index_page_path = site_builder_return_obj[0]
links_dict = site_builder_return_obj[1]
expectation_suite_pages = [
file_relative_path(index_page_path, expectation_suite_link_dict["filepath"])
for expectation_suite_link_dict in links_dict["expectations_links"]
]
profiling_results_pages = [
file_relative_path(index_page_path, profiling_link_dict["filepath"])
for profiling_link_dict in links_dict["profiling_links"]
]
page_paths_to_check = (
[index_page_path] + expectation_suite_pages + profiling_results_pages
)
expected_logo_url = "https://great-expectations-web-assets.s3.us-east-2.amazonaws.com/logo-long.png?d=20190924T231836.000000Z"
for page_path in page_paths_to_check:
with open(page_path[7:]) as f:
page_contents = f.read()
assert expected_logo_url in page_contents
assert data_context_id not in | |
[]
temp.append(region.exterior.coords[index][1])
temp.append(region.exterior.coords[index][0])
region_coords.append(temp)
index += 1
# to plot point's coordinates over the map as polygon
region_plot = folium.Polygon(locations=region_coords,
color=region_color, popup=label)
region_map.add_child(region_plot)
def extract_barplot_info(day_length):
""" To extract information for matplotlib plot
Keyword Arguments:
day_length {list} -- list with total length for each day
Returns:
day, height, highest, highest_index, average {strings/integers}
-- attributes required for plots
"""
day = []
height = []
highest = 0
highest_index = -1
total = 0
index = 0
for row in day_length:
day.append(row[0][:3]) # extracting name of day of the week
# in form of Mon, Tue etc.
track_length = round(row[1], 2) # extracting total length
# associated with each day rounded to 2 decimals
height.append(track_length)
# extracting the highest value out of 'total lengths' from all
# weekdays
if(track_length > highest):
highest = track_length
highest_index = index
total += track_length
index += 1
average_value = total/7 # extracting average value out of
# 'total lengths' from all weekdays
average = []
for row in day:
average.append(average_value) # a list of same value at each
# index, just to plot a horizontal line in plot
return day, height, highest, highest_index, average
def spatioTemporalAggregation(df, field, summary, gridSize):
"""
Aggregates the given field on hour and weekday basis.
Prepares data for mosaic plot
FOR THIS TO WORK YOU NEED TO INSTALL RTree or Rtree-linux!!!
# TODO This function is poorly performing
Parameters
----------
df : geopandas dataframe
field : string
field to be summarized.
summary : string
type of summary to be sumarized. eg. min, max,sum, median
gridSize : float
the size of grid on same unit as geodataframe coordinates.
Returns
-------
geodataframes: one each for larger grid and other for subgrids
(for visualization purpose only)
Aggregated grids with summary on it
"""
def round_down(num, divisor):
return floor(num / divisor) * divisor
def round_up(num, divisor):
return ceil(num / divisor) * divisor
# Get crs from data
sourceCRS = df.crs
targetCRS = "epsg:3857"
# Reproject to Mercator\
df = df.to_crs(targetCRS)
# Get bounds
xmin, ymin, xmax, ymax = df.total_bounds
height, width = gridSize, gridSize
top, left = round_up(ymax, height), round_down(xmin, width)
bottom, right = round_down(ymin, height), round_up(xmax, width)
rows = int((top - bottom) / height)+1
cols = int((right - left) / width)+1
XleftOrigin = left
XrightOrigin = left + width
YtopOrigin = top
YbottomOrigin = top - height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
polygons.append(Polygon(
[(XleftOrigin, Ytop), (XrightOrigin, Ytop),
(XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
grid = gpd.GeoDataFrame({'geometry': polygons})
grid.crs = (targetCRS)
# Assign gridid
numGrid = len(grid)
grid['gridId'] = list(range(numGrid))
# Identify gridId for each point
df['hour'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S+00:00')).dt.hour
df['weekday'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S+00:00')).dt.dayofweek
# df['hour'] = pd.to_datetime(df['time']).dt.hour
# df['weekday'] = pd.to_datetime(df['time']).dt.dayofweek
points_identified = gpd.sjoin(df, grid, op='within')
# group points by gridid and calculate mean Easting,
# store it as dataframe
# delete if field already exists
if field in grid.columns:
del grid[field]
# Aggregate by weekday, hour and grid
grouped = points_identified.groupby(
['gridId', 'weekday', 'hour']).agg({field: [summary]})
grouped = grouped.reset_index()
grouped.columns = grouped.columns.map("_".join)
modified_fieldname = field+"_"+summary
# Create Subgrids
subgrid, mainGrid, rowNum, columnNum, value = [], [], [], [], []
unikGrid = grouped['gridId_'].unique()
print('running; wait till you see "finished"')
for currentGrid in unikGrid:
dataframe = grid[grid['gridId'] == currentGrid]
xmin, ymin, xmax, ymax = dataframe.total_bounds
xminn, xmaxx, yminn, ymaxx = xmin + \
(xmax-xmin)*0.05, xmax-(xmax-xmin)*0.05, ymin + \
(ymax-ymin)*0.05, ymax-(ymax-ymin)*0.05
rowOffset = (ymaxx-yminn)/24.0
colOffset = (xmaxx - xminn)/7.0
tmp = (grouped['gridId_'] == currentGrid)
for i in range(7):
tmp2=(grouped['weekday_'] == i)
for j in range(24):
topy, bottomy, leftx, rightx = ymaxx-j*rowOffset, ymaxx - \
(j+1)*rowOffset, xminn+i * \
colOffset, xminn+(i+1)*colOffset
subgrid.append(
Polygon([(leftx, topy), (rightx, topy),
(rightx, bottomy), (leftx, bottomy)]))
mainGrid.append(currentGrid)
rowNum.append(j)
columnNum.append(i)
if len(grouped[tmp
& tmp2
& (grouped['hour_'] == j)]) != 0:
this_value = grouped[
tmp
& tmp2
& (grouped['hour_'] == j)].iloc[0][
modified_fieldname]
value.append(this_value)
else:
value.append(np.nan)
subgrid_gpd = gpd.GeoDataFrame({'geometry': subgrid})
subgrid_gpd.crs = targetCRS
# Reproject to Mercator\
subgrid_gpd = subgrid_gpd.to_crs(sourceCRS)
subgrid_gpd['gridId'] = mainGrid
subgrid_gpd['Weekday'] = columnNum
subgrid_gpd['hour'] = rowNum
subgrid_gpd['gridId'] = subgrid_gpd.apply(lambda x: str(
x['gridId'])+"_"+str(x['Weekday'])+"_"+str(x['hour']), axis=1)
subgrid_gpd[modified_fieldname] = value
subgrid_gpd = subgrid_gpd.dropna()
grid = grid.to_crs(sourceCRS)
grid = grid[grid['gridId'].isin(unikGrid)]
print('finished')
return grid, subgrid_gpd
# final_subgrid=subgrid_gpd[subgrid_gpd['value'].notnull()]
# return final_subgrid
#############################################################################################################################
def MosaicPlot(mainGrid, grid, field):
"""
Performs spatio temporal aggregation of data on weekday and hour,
and prepares mosaicplot.
Parameters
----------
mainGrid :polygon geodataframe
The grid geodataframe with grid and aggregated data in a column.
Grid shoud have grid id or equivalent unique ids
grid: Small subgrids, prepared for visualization purpose
only represents an hour of a weekday
field : string
Fieldname with aggregated data
Returns
-------
m : folium map object
Folium map with openstreetmap as base.
"""
# Prepare for grid plotting using folium
grid.columns = [cols.replace('.', '_') for cols in grid.columns]
field = field.replace('.', '_')
# Convert grid id to string
grid['gridId'] = grid['gridId'].astype(str)
# Convert maingrid,subgrid to geojson and csv
mainGrid.to_file("mainGrids.geojson", driver='GeoJSON')
atts = pd.DataFrame(grid)
grid.to_file("grids.geojson", driver='GeoJSON')
atts.to_csv("attributes.csv", index=False)
# load spatial and non-spatial data
data_geojson_source = "grids.geojson"
# data_geojson=gpd.read_file(data_geojson_source)
data_geojson = json.load(open(data_geojson_source))
# load spatial and non-spatial data
grid_geojson_source = "mainGrids.geojson"
mainGrid_geojson = json.load(open(grid_geojson_source))
# Get coordiantes for map centre
lat = grid.geometry.centroid.y.mean()
lon = grid.geometry.centroid.x.mean()
# Intialize a new folium map object
m = folium.Map(location=[lat, lon],
zoom_start=10, tiles='Stamen Toner')
# Configure geojson layer
# style = {'fillColor': '#f5f5f5', 'lineColor': '#ffffbf'}
# polygon = folium.GeoJson(gjson, style_function = \
# lambda x: style).add_to(m)
# def style_function():
# return {'fillColor': '#00FFFFFF', 'lineColor': '#00FFFFFF'}
# folium.GeoJson(data_geojson).add_to(m)
folium.GeoJson(mainGrid_geojson,
lambda feature: {'lineOpacity': 0.4,
'color': '#00ddbb',
'fillColor': None,
'weight': 2,
'fillOpacity': 0}).add_to(m)
# add attribute data
attribute_pd = pd.read_csv("attributes.csv")
attribute = pd.DataFrame(attribute_pd)
# Convert gridId to string to ensure it matches with gridId
attribute['gridId'] = attribute['gridId'].astype(str)
# construct color map
minvalue = attribute[field].min()
maxvalue = attribute[field].max()
colormap_rn = linear.YlOrRd_09.scale(minvalue, maxvalue)
# Create Dictionary for colormap
population_dict_rn = attribute.set_index('gridId')[field]
# create map
folium.GeoJson(
data_geojson,
name='Choropleth map',
style_function=lambda feature: {
'lineOpacity': 0,
'color': 'green',
'fillColor': colormap_rn(population_dict_rn[
feature['properties']['gridId']]),
'weight': 0,
'fillOpacity': 0.9
},
highlight_function=lambda feature: {
'weight': 3, 'color': 'black', 'fillOpacity': 1},
tooltip=folium.features.GeoJsonTooltip(fields=['Weekday', 'hour',
field])).add_to(m)
# format legend
field = field.replace("_", " ")
# add a legend
colormap_rn.caption = '{value} per grid by weekday and hour'.format(
value=field)
colormap_rn.add_to(m)
# add a layer control
folium.LayerControl().add_to(m)
return m
# Aggregate data by weekday and hour
def aggregateByGrid(df, field, summary, gridSize):
"""
Aggregates the specified field with chosen summary type and user
defined grid size. returns aggregated grids with summary
Parameters
----------
df : geopandas dataframe
field : string
field to be summarized.
summary : string
type of summary to be sumarized. eg. min, max,sum, median
gridSize : float
the size of grid on same unit as geodataframe coordinates.
Returns
-------
geodataframe
Aggregated grids with summary on it
"""
def round_down(num, divisor):
return floor(num / divisor) * divisor
def round_up(num, divisor):
return ceil(num / divisor) * divisor
# Get crs from data
sourceCRS = df.crs
targetCRS = "EPSG:3857"
# Reproject to Mercator\
df = df.to_crs(targetCRS)
# Get bounds
xmin, ymin, xmax, ymax = df.total_bounds
print(xmin, ymin, xmax, ymax)
height, width = gridSize, gridSize
top, left = round_up(ymax, height), round_down(xmin, width)
bottom, right = round_down(ymin, height), round_up(xmax, width)
rows = int((top - bottom) / height)+1
cols = int((right - left) / width)+1
XleftOrigin = left
XrightOrigin = left + width
YtopOrigin = top
YbottomOrigin = top - height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
polygons.append(Polygon([(XleftOrigin, Ytop),
(XrightOrigin, Ytop),
(XrightOrigin, Ybottom),
(XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
grid = gpd.GeoDataFrame({'geometry': polygons})
grid.crs = df.crs
# Assign gridid
numGrid = len(grid)
grid['gridId'] = list(range(numGrid))
# Identify gridId for each point
points_identified | |
'verbose_name_plural': '09 Gedung Sosial',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanTebingTinggi',
fields=[
],
options={
'verbose_name': '38 Gedung Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '38 Gedung Tebing Tinggi',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapus',
fields=[
],
options={
'verbose_name': 'Gedung Bangunan Usul Hapus',
'proxy': True,
'verbose_name_plural': 'Gedung Bangunan Usul Hapus',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusAwayan',
fields=[
],
options={
'verbose_name': '34 Gedung Usul Hapus Awayan',
'proxy': True,
'verbose_name_plural': '34 Gedung Usul Hapus Awayan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBAPPEDA',
fields=[
],
options={
'verbose_name': '21 Gedung Usul Hapus BAPPEDA',
'proxy': True,
'verbose_name_plural': '21 Gedung Usul Hapus BAPPEDA',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBatumandi',
fields=[
],
options={
'verbose_name': '32 Gedung Usul Hapus Batumandi',
'proxy': True,
'verbose_name_plural': '32 Gedung Usul Hapus Batumandi',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBatuPiring',
fields=[
],
options={
'verbose_name': '37 Gedung Usul Hapus Batu Piring',
'proxy': True,
'verbose_name_plural': '37 Gedung Usul Hapus Batu Piring',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBKD',
fields=[
],
options={
'verbose_name': '19 Gedung Usul Hapus BKD',
'proxy': True,
'verbose_name_plural': '19 Gedung Usul Hapus BKD',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBKPPD',
fields=[
],
options={
'verbose_name': '26 Gedung Usul Hapus BKPPD',
'proxy': True,
'verbose_name_plural': '26 Gedung Usul Hapus BKPPD',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBPBD',
fields=[
],
options={
'verbose_name': '39 Gedung Usul Hapus BPBD',
'proxy': True,
'verbose_name_plural': '39 Gedung Usul Hapus BPBD',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusBPPD',
fields=[
],
options={
'verbose_name': '48 Gedung Usul Hapus BPPD',
'proxy': True,
'verbose_name_plural': '48 Gedung Usul Hapus BPPD',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDinkes',
fields=[
],
options={
'verbose_name': '05 Gedung Usul Hapus Dinkes',
'proxy': True,
'verbose_name_plural': '05 Gedung Usul Hapus Dinkes',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDisdik',
fields=[
],
options={
'verbose_name': '07 Gedung Usul Hapus Disdik',
'proxy': True,
'verbose_name_plural': '07 Gedung Usul Hapus Disdik',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDishub',
fields=[
],
options={
'verbose_name': '04 Gedung Usul Hapus Dishub',
'proxy': True,
'verbose_name_plural': '04 Gedung Usul Hapus Dishub',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDisnakertrans',
fields=[
],
options={
'verbose_name': '41 Gedung Usul Hapus Disnakertrans',
'proxy': True,
'verbose_name_plural': '41 Gedung Usul Hapus Disnakertrans',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDistamben',
fields=[
],
options={
'verbose_name': '17 Gedung Usul Hapus Distamben',
'proxy': True,
'verbose_name_plural': '17 Gedung Usul Hapus Distamben',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDKO',
fields=[
],
options={
'verbose_name': '23 Gedung Usul Hapus DKO',
'proxy': True,
'verbose_name_plural': '23 Gedung Usul Hapus DKO',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDKP',
fields=[
],
options={
'verbose_name': '15 Gedung Usul Hapus DKP',
'proxy': True,
'verbose_name_plural': '15 Gedung Usul Hapus DKP',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDKUKMP',
fields=[
],
options={
'verbose_name': '16 Gedung Usul Hapus DKUKMP',
'proxy': True,
'verbose_name_plural': '16 Gedung Usul Hapus DKUKMP',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDLH',
fields=[
],
options={
'verbose_name': '22 Gedung Usul Hapus DLH',
'proxy': True,
'verbose_name_plural': '22 Gedung Usul Hapus DLH',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDPKP',
fields=[
],
options={
'verbose_name': '40 Gedung Usul Hapus DPKP',
'proxy': True,
'verbose_name_plural': '40 Gedung Usul Hapus DPKP',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDPMD',
fields=[
],
options={
'verbose_name': '10 Gedung Usul Hapus DPMD',
'proxy': True,
'verbose_name_plural': '10 Gedung Usul Hapus DPMD',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDPMPTSP',
fields=[
],
options={
'verbose_name': '18 Gedung Usul Hapus DPMPTSP',
'proxy': True,
'verbose_name_plural': '18 Gedung Usul Hapus DPMPTSP',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDPPKB',
fields=[
],
options={
'verbose_name': '42 Gedung Usul Hapus DPPKB',
'proxy': True,
'verbose_name_plural': '42 Gedung Usul Hapus DPPKB',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDPPPA',
fields=[
],
options={
'verbose_name': '11 Gedung Usul Hapus DPPPA',
'proxy': True,
'verbose_name_plural': '11 Gedung Usul Hapus DPPPA',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDPUPR',
fields=[
],
options={
'verbose_name': '03 Gedung Usul Hapus DPUPR',
'proxy': True,
'verbose_name_plural': '03 Gedung Usul Hapus DPUPR',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusDukCatPil',
fields=[
],
options={
'verbose_name': '12 Gedung Usul Hapus DukCatPil',
'proxy': True,
'verbose_name_plural': '12 Gedung Usul Hapus DukCatPil',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusHalong',
fields=[
],
options={
'verbose_name': '35 Gedung Usul Hapus Halong',
'proxy': True,
'verbose_name_plural': '35 Gedung Usul Hapus Halong',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusInspektorat',
fields=[
],
options={
'verbose_name': '20 Gedung Usul Hapus Inspektorat',
'proxy': True,
'verbose_name_plural': '20 Gedung Usul Hapus Inspektorat',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusJuai',
fields=[
],
options={
'verbose_name': '33 Gedung Usul Hapus Juai',
'proxy': True,
'verbose_name_plural': '33 Gedung Usul Hapus Juai',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusKearsipan',
fields=[
],
options={
'verbose_name': '44 Gedung Usul Hapus Kearsipan',
'proxy': True,
'verbose_name_plural': '44 Gedung Usul Hapus Kearsipan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusKehutanan',
fields=[
],
options={
'verbose_name': '14 Gedung Usul Hapus Kehutanan',
'proxy': True,
'verbose_name_plural': '14 Gedung Usul Hapus Kehutanan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusKESBANGPOL',
fields=[
],
options={
'verbose_name': '24 Gedung Usul Hapus KESBANGPOL',
'proxy': True,
'verbose_name_plural': '24 Gedung Usul Hapus KESBANGPOL',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusKominfo',
fields=[
],
options={
'verbose_name': '43 Gedung Usul Hapus Kominfo',
'proxy': True,
'verbose_name_plural': '43 Gedung Usul Hapus Kominfo',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusLampihong',
fields=[
],
options={
'verbose_name': '31 Gedung Usul Hapus Lampihong',
'proxy': True,
'verbose_name_plural': '31 Gedung Usul Hapus Lampihong',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusParingin',
fields=[
],
options={
'verbose_name': '28 Gedung Usul Hapus Paringin',
'proxy': True,
'verbose_name_plural': '28 Gedung Usul Hapus Paringin',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusParinginKota',
fields=[
],
options={
'verbose_name': '29 Gedung Usul Hapus Paringin Kota',
'proxy': True,
'verbose_name_plural': '29 Gedung Usul Hapus Paringin Kota',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusParinginSelatan',
fields=[
],
options={
'verbose_name': '36 Gedung Usul Hapus Paringin Selatan',
'proxy': True,
'verbose_name_plural': '36 Gedung Usul Hapus Paringin Selatan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusParinginTimur',
fields=[
],
options={
'verbose_name': '30 Gedung Usul Hapus Paringin Timur',
'proxy': True,
'verbose_name_plural': '30 Gedung Usul Hapus Paringin Timur',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusPariwisata',
fields=[
],
options={
'verbose_name': '46 Gedung Usul Hapus Pariwisata',
'proxy': True,
'verbose_name_plural': '46 Gedung Usul Hapus Pariwisata',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusPerdagangan',
fields=[
],
options={
'verbose_name': '47 Gedung Usul Hapus Perdagangan',
'proxy': True,
'verbose_name_plural': '47 Gedung Usul Hapus Perdagangan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusPerikanan',
fields=[
],
options={
'verbose_name': '45 Gedung Usul Hapus Perikanan',
'proxy': True,
'verbose_name_plural': '45 Gedung Usul Hapus Perikanan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusPerpustakaan',
fields=[
],
options={
'verbose_name': '08 Gedung Usul Hapus Perpustakaan',
'proxy': True,
'verbose_name_plural': '08 Gedung Usul Hapus Perpustakaan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusPertanian',
fields=[
],
options={
'verbose_name': '13 Gedung Usul Hapus Pertanian',
'proxy': True,
'verbose_name_plural': '13 Gedung Usul Hapus Pertanian',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusRSUD',
fields=[
],
options={
'verbose_name': '06 Gedung Usul Hapus RSUD',
'proxy': True,
'verbose_name_plural': '06 Gedung Usul Hapus RSUD',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusSATPOLPP',
fields=[
],
options={
'verbose_name': '25 Gedung Usul Hapus SATPOLPP',
'proxy': True,
'verbose_name_plural': '25 Gedung Usul Hapus SATPOLPP',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusSekretariatKorpri',
fields=[
],
options={
'verbose_name': '27 Gedung Usul Hapus Sekretariat Korpri',
'proxy': True,
'verbose_name_plural': '27 Gedung Usul Hapus Sekretariat Korpri',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusSetda',
fields=[
],
options={
'verbose_name': '02 Gedung Usul Hapus Setda',
'proxy': True,
'verbose_name_plural': '02 Gedung Usul Hapus Setda',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusSetwan',
fields=[
],
options={
'verbose_name': '01 Gedung Usul Hapus Setwan',
'proxy': True,
'verbose_name_plural': '01 Gedung Usul Hapus Setwan',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusSosial',
fields=[
],
options={
'verbose_name': '09 Gedung Usul Hapus Sosial',
'proxy': True,
'verbose_name_plural': '09 Gedung Usul Hapus Sosial',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='GedungBangunanUsulHapusTebingTinggi',
fields=[
],
options={
'verbose_name': '38 Gedung Usul Hapus Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '38 Gedung Usul Hapus Tebing Tinggi',
},
bases=('gedungbangunan.gedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanAwayan',
fields=[
],
options={
'verbose_name': '34 Harga Gedung Awayan',
'proxy': True,
'verbose_name_plural': '34 Harga Gedung Awayan',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBAPPEDA',
fields=[
],
options={
'verbose_name': '21 Harga Gedung BAPPEDA',
'proxy': True,
'verbose_name_plural': '21 Harga Gedung BAPPEDA',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBatumandi',
fields=[
],
options={
'verbose_name': '32 Harga Gedung Batumandi',
'proxy': True,
'verbose_name_plural': '32 Harga Gedung Batumandi',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBatuPiring',
fields=[
],
options={
'verbose_name': '37 Harga Gedung Batu Piring',
'proxy': True,
'verbose_name_plural': '37 Harga Gedung Batu Piring',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBKD',
fields=[
],
options={
'verbose_name': '19 Harga Gedung BKD',
'proxy': True,
'verbose_name_plural': '19 Harga Gedung BKD',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBKPPD',
fields=[
],
options={
'verbose_name': '26 Harga Gedung BKPPD',
'proxy': True,
'verbose_name_plural': '26 Harga Gedung BKPPD',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBPBD',
fields=[
],
options={
'verbose_name': '39 Harga Gedung BPBD',
'proxy': True,
'verbose_name_plural': '39 Harga Gedung BPBD',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanBPPD',
fields=[
],
options={
'verbose_name': '48 Harga Gedung BPPD',
'proxy': True,
'verbose_name_plural': '48 Harga Gedung BPPD',
},
bases=('gedungbangunan.hargagedungbangunan',),
),
migrations.CreateModel(
name='HargaGedungBangunanDinkes',
fields=[
| |
@param arraylen: many bools will there be? FIX: handle variable
@type unavailable: bool or None
@param unavailable: the default value to use if none given (if not None)
@type bv: str
@param bv: BitVector containing the incoming data
@type dataDict: str
@param dataDict: dictionary in which to place the results
@type decodeOnly: bool
@param decodeOnly: Set to true to only get the code for decoding
@rtype: int
@return: index one past the end of where this read
'''
assert(type=='bool')
if verbose: print type,'decode',name,': unvail=',unavailable,' numbits:',numbits, ' startindex=',startindex
#int(startindex); int(numbits) # Make sure it is a number
assert numbits==1
assert arraylen == 1 # FIX... handle arrays
if None != required:
assert type(required)==bool
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
if required: o.write('True\n')
else: o.write('False\n')
if not decodeOnly: o.write('\n')
return int(startindex)+int(numbits)
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write('bool(int('+bv+'['+str(startindex)+':'+str(startindex+int(numbits)*int(arraylen))+']))')
if not decodeOnly: o.write('\n')
return int(startindex)+int(numbits)
def decodeUInt(o,name,type,startindex,numbits,required=None,arraylen=1,unavailable=None,
bv='bv',dataDict='r',verbose=False, decodeOnly=False):
'''
Build the decoder for unsigned integer variables
@type o: file like obj
@param o: where write the code
@type name: str
@param name: field name
@type type: str
@param type: uint, etc.
@type startindex: int
@param startindex: bit that begins the uint(s)
@type numbits: int >= 1
@param numbits: How many bits per unit datum
@type required: int or None
@param required: If not None, then the value must be set to this.
@type arraylen: int >= 1
@param arraylen: many ints will there be? FIX: handle variable
@type unavailable: int or None
@param unavailable: the default value to use if none given (if not None)
@type bv: str
@param bv: BitVector containing the incoming data
@type dataDict: str
@param dataDict: dictionary in which to place the results
@type decodeOnly: bool
@param decodeOnly: Set to true to only get the code for decoding
@rtype: int
@return: index one past the end of where this read
'''
if verbose: print type,'decode',name,': unvail=',unavailable,' numbits:',numbits, ' startindex=',startindex
if arraylen is None:
arraylen = 1
assert arraylen == 1 # FIX... handle arrays
assert numbits >= 1
if not decodeOnly:
verbose=False
if required is not None:
int(required) # Make sure required is a number
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write(str(required))
if not decodeOnly: o.write('\n')
return startindex+numbits
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write('int('+bv+'['+str(startindex)+':'+str(startindex+int(numbits)*int(arraylen))+'])')
if not decodeOnly: o.write('\n')
if verbose: o.write('\n')
return startindex+numbits
def decodeInt(o,name,type,startindex,numbits,required=None,arraylen=1,unavailable=None,
bv='bv',dataDict='r',verbose=False, decodeOnly=False):
'''
Build the decoder for unsigned integer variables
@type o: file like obj
@param o: where write the code
@type name: str
@param name: field name
@type type: str
@param type: int
@type startindex: int
@param startindex: bit that begins the int(s)
@type numbits: int >= 1
@param numbits: How many bits per unit datum
@type required: int or None
@param required: If not None, then the value must be set to this.
@type arraylen: int >= 1
@param arraylen: many ints will there be? FIX: handle variable
@type unavailable: int or None
@param unavailable: the default value to use if none given (if not None)
@type bv: str
@param bv: BitVector containing the incoming data
@type dataDict: str
@param dataDict: dictionary in which to place the results
@type decodeOnly: bool
@param decodeOnly: Set to true to only get the code for decoding
@rtype: int
@return: index one past the end of where this read
'''
assert type=='int'
if verbose:
print type, 'decode', name, ': unvail=', unavailable,
print ' numbits:', numbits, ' startindex=', startindex
if arraylen is None:
arraylen=1
end = startindex+int(numbits) * int(arraylen)
assert arraylen == 1 # FIX: handle arrays
assert numbits >= 1
if None != required:
int(required) # Make sure required is a number
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write(str(required))
if not decodeOnly: o.write('\n')
return end
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write('binary.signedIntFromBV('+bv+'['+str(startindex)+':'+str(end)+'])')
if not decodeOnly: o.write('\n')
if verbose: o.write('\n')
return end
def decodeFloat(o,name,type,startindex,numbits,required=None,arraylen=1,unavailable=None,
bv='bv',dataDict='r',verbose=False, decodeOnly=False):
'''
Build the decoder for IEEE float variables
@type o: file like obj
@param o: where write the code
@type name: str
@param name: field name
@type type: str
@param type: int
@type startindex: int
@param startindex: bit that begins the int(s)
@type numbits: int >= 1
@param numbits: How many bits per unit datum
@type required: float or None
@param required: If not None, then the value must be set to this.
@type arraylen: int >= 1
@param arraylen: many ints will there be? FIX: handle variable
@type unavailable: float or None
@param unavailable: the default value to use if none given (if not None)
@type bv: str
@param bv: BitVector containing the incoming data
@type dataDict: str
@param dataDict: dictionary in which to place the results
@type decodeOnly: bool
@param decodeOnly: Set to true to only get the code for decoding
@rtype: int
@return: index one past the end of where this read
'''
assert type=='float'
if verbose: print type,'decode',name,': unvail=',unavailable,' numbits:',numbits, ' startindex=',startindex
if None==arraylen: arraylen=1
end = startindex+int(numbits)*int(arraylen)
assert arraylen == 1 # FIX... handle arrays
assert numbits>=1
if None != required:
float(required) # Make sure required is a number
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write(str(required))
if not decodeOnly: o.write('\n')
if verbose: o.write('\n')
return end
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write('binary.bitvec2float('+bv+'['+str(startindex)+':'+str(end)+'])')
if not decodeOnly: o.write('\n')
return end
def decodeAisstr6(o,name,type,startindex,numbits,required=None,arraylen=1,unavailable=None,
bv='bv',dataDict='r',verbose=False, decodeOnly=False):
'''
Build the decoder for aisstr6 variables. Generally arrays.
@bug: FIX: validate strings??
@type o: file like obj
@param o: where write the code
@type name: str
@param name: field name
@type type: str
@param type: 'aisstr6'
@type startindex: int
@param startindex: bit that begins the int(s)
@type numbits: int >= 1
@param numbits: How many bits per unit datum
@type required: restricted str or None
@param required: If not None, then the value must be set to this.
@type arraylen: int >= 1
@param arraylen: many ints will there be? FIX: handle variable
@type unavailable: restricted str or None
@param unavailable: the default value to use if none given (if not None)
@type bv: str
@param bv: BitVector containing the incoming data
@type dataDict: str
@param dataDict: dictionary in which to place the results
@type decodeOnly: bool
@param decodeOnly: Set to true to only get the code for decoding
@rtype: int
@return: index one past the end of where this read
'''
assert type=='aisstr6'
if verbose: print type,'decode',name,': unvail=',unavailable,' numbits:',numbits, ' startindex=',startindex
if None==arraylen: arraylen=1
end = startindex+int(numbits)*int(arraylen)
assert arraylen >= 1 # FIX... handle arrays
assert numbits>=1
if None != required:
float(required) # Make sure required is a number
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write(required)
if not decodeOnly: o.write('\n')
return end
if not decodeOnly: o.write(' '+dataDict+'[\''+name+'\']=')
o.write('aisstring.decode('+bv+'['+str(startindex)+':'+str(end)+'])')
if not decodeOnly: o.write('\n')
return end
def decodeDecimal(o,name,type,startindex,numbits,required=None,arraylen=1,unavailable=None,
bv='bv',dataDict='r',verbose=False,scale=None, decodeOnly=False,offset=None):
'''
Build the decoder for signed decimal variables
@type o: file like obj
@param o: where write the code
@type name: str
@param name: field name
@type type: str
@param type: 'decimal'
@type startindex: int
@param startindex: bit that begins the int(s)
@type numbits: int >= 1
@param numbits: How many bits per unit datum
@type required: Decimal or None
@param required: If not None, then the value must be set to this.
@type arraylen: int >= 1
@param arraylen: many ints will there be? FIX: handle variable
@type unavailable: Decimal or None
@param unavailable: the default value to use if none given (if not None)
@type bv: str
@param bv: BitVector containing the incoming data
@type dataDict: str
@param dataDict: dictionary in which to place the results
@type decodeOnly: bool
@param decodeOnly: Set to true to only get the code for decoding
@rtype: int
@return: index one past the end of where this read
'''
assert type=='decimal'
if verbose: print type,'decode',name,': unvail=',unavailable,' numbits:',numbits, ' startindex=',startindex
if None==arraylen: arraylen=1
end = startindex+int(numbits)*int(arraylen)
assert arraylen == 1 # FIX... handle arrays
assert numbits>=1 and numbits <= 32
if None == scale: scale='1' # Warning about this was in the encode section
if None != required:
Decimal(required) # Make sure required is a number
if not decodeOnly: o.write(' | |
import argparse
import logging
import os
import re
import sys
import traceback
from datetime import datetime
from pathlib import Path
from .bamCLIP import bamCLIP
from .countCLIP import countCLIP
from .createMatrix import MatrixConverter
from .gffCLIP import FeatureOrderException, gffCLIP
'''
--------------------------------------------------
htseq-clip main
Authors: <NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
Modified by: <NAME>, <EMAIL>
Institution: EMBL Heidelberg
Date: October 2015
--------------------------------------------------
'''
def _annotation(args):
'''
Parse annotations from given GFF file
@TODO use logging module
'''
logging.info('Parsing annotations')
logging.info('GFF file {}, output file {}'.format(args.gff,args.output))
gffc = gffCLIP(args)
try:
gffc.process(args.unsorted)
except FeatureOrderException as se:
if args.unsorted:
raise(se)
else:
logging.warning(str(se))
logging.warning('Trying to parse {} with "--unsorted" option.'.format(args.gff))
logging.warning('This step is memory hungry')
gffc.process(True)
def _createSlidingWindows(args):
'''
Create sliding windows from the given annotation file
'''
logging.info('Create sliding windows')
logging.info('input file {}, output file {}'.format(args.input,args.output))
logging.info('Window size {} step size {}'.format(args.windowSize,args.windowStep))
gffc = gffCLIP(args)
gffc.slidingWindow(args.input)
def _mapToId(args):
logging.info('Creating mapping file from annotations')
logging.info('Input file {} output file {}'.format(args.annotation,args.output))
# additional params
mapC = countCLIP(args)
mapC.annotationToIDs()
def _extract(args):
'''
Extract cross-link sites
'''
if args.choice == 's':
logging.info('Extracting start sites')
logging.info('Bam file : {}, output file: {}, offset: {}'.format(args.input,args.output,args.offset))
with bamCLIP(args) as bh:
bh.extract_start_sites(offset = args.offset)
elif args.choice == 'i':
logging.info('Extracting insertion sites')
logging.info('Bam file : {}, output file: {}'.format(args.input,args.output))
with bamCLIP(args) as bh:
bh.extract_insertion_sites()
elif args.choice == 'd':
logging.info('Extracting deletion sites')
logging.info('Bam file : {}, output file: {}'.format(args.input,args.output))
with bamCLIP(args) as bh:
bh.extract_deletion_sites()
elif args.choice == 'm':
logging.info('Extracting middle sites')
logging.info('Bam file : {}, output file: {}'.format(args.input,args.output))
with bamCLIP(args) as bh:
bh.extract_middle_sites()
elif args.choice == 'e':
logging.info('Extracting end sites')
logging.info('Bam file : {}, output file: {}, offset: {}'.format(args.input,args.output,args.offset))
with bamCLIP(args) as bh:
bh.extract_end_sites(offset = args.offset)
def _count(args):
'''
Count crosslink sites per sliding window
'''
logging.info('Count crosslink sites')
logging.info('Annotation file {} crosslink sites file {} output file {}'.format(args.annotation,args.input,args.output))
# sanity check temp dir exists
if (args.cpTmp) and (args.tmp is not None):
tmpAbs = Path(args.tmp).absolute()
if not tmpAbs.exists():
raise RuntimeError("Folder {} given under '--tmp' parameter does not exists!".format(str(tmpAbs)))
# not all necessary but for completeness
args.tmp = str(tmpAbs)
countC = countCLIP(args)
stranded = True
if args.unstranded:
stranded = False
countC.count(stranded)
def _countMatrix(args):
logging.info('Generate matrix from files')
logging.info('Input folder {}, output file {}'.format(args.input,args.output))
mC = MatrixConverter(args.input,args.prefix,args.postfix,args.output)
mC.read_samples()
mC.write_matrix()
logger = logging.getLogger()
def main():
prog = 'htseq-clip'
description = '''
{0}: A flexible toolset for the analysis of iCLIP and eCLIP sequencing data
The function (as a positional argument) should be one of:
[Annotation]
annotation flattens a gff formatted annotation file
createSlidingWindows creates sliding windows based on given annotation file
mapToId map entries in "name" column to unique ids and write in tab separated format
[Extraction]
extract extracts crosslink sites, insertions or deletions
[Counting]
count count sites in annotation
[Helpers]
createMatrix create R friendly matrix from count function output files
'''.format(prog)
epilog = "For command line options of each argument, use: {} <positional argument> -h".format(prog)
parser = argparse.ArgumentParser(prog=prog, description=description,epilog=epilog,formatter_class=argparse.RawDescriptionHelpFormatter)
# log levels
loglevels = ['debug','info','warn','quiet']
# subparsers
subps = parser.add_subparsers(help='Need positional arguments',dest='subparser')
''' ____________________ [Annotation] ___________________ '''
# annotation
ahelp = 'annotation: flattens (to BED format) the given annotation file (in GFF format)'
annotation = subps.add_parser('annotation',description=ahelp, formatter_class=argparse.RawTextHelpFormatter) # help='flatten annotation',
annotation.add_argument('-g','--gff',metavar='annotation',dest='gff',help='GFF formatted annotation file, supports gzipped (.gz) files',required=True)
annotation.add_argument('-o','--output',metavar = 'output file',dest='output',help='output file (.bed[.gz], default: print to console)',default=None,type=str)
annotation.add_argument('-u','--geneid',metavar='gene id',dest='id',help='Gene id attribute in GFF file (default: gene_id for gencode gff files)',default='gene_id',type=str)
annotation.add_argument('-n','--genename',metavar='gene name',dest='name',help='Gene name attribute in GFF file (default: gene_name for gencode gff files)',default='gene_name',type=str)
annotation.add_argument('-t','--genetype',metavar='gene type',dest='type',help='Gene type attribute in GFF file (default: gene_type for gencode gff files)',default='gene_type',type=str)
annotation.add_argument('--splitExons',dest='splitExons',help='use this flag to split exons into exonic features such as 5\'UTR, CDS and 3\' UTR',action='store_true')
annotation.add_argument('--unsorted',dest='unsorted',help='use this flag if the GFF file is unsorted',action='store_true')
annotation.add_argument('-v','--verbose',metavar='Verbose level',dest='log',help='Allowed choices: '+', '.join(loglevels)+' (default: info)',choices=loglevels,default='info')
# createSlidingWindows
cshelp = 'createSlidingWindows: creates sliding windows out of the flattened annotation file'
createSlidingWindows = subps.add_parser('createSlidingWindows',description=cshelp, formatter_class=argparse.RawTextHelpFormatter) # help='create sliding windows',
createSlidingWindows.add_argument('-i','--input',metavar='input file',dest='input',help='flattend annotation file, see "{} annotation -h"'.format(prog),required=True)
createSlidingWindows.add_argument('-o','--output',metavar = 'output file',dest='output',help='annotation sliding windows file (.bed[.gz], default: print to console)',default=None,type=str)
createSlidingWindows.add_argument('-w','--windowSize',metavar = 'window size',dest='windowSize',help='window size (in number of base pairs) for sliding window (default: 50)',default=50,type=int)
createSlidingWindows.add_argument('-s','--windowStep',metavar = 'step size',dest='windowStep',help='window step size for sliding window (default: 20)',default=20,type=int)
createSlidingWindows.add_argument('-v','--verbose',metavar='Verbose level',dest='log',help='Allowed choices: '+', '.join(loglevels)+' (default: info)',choices=loglevels,default='info')
# mapToIds
maphelp = 'mapToId: extract "name" column from the annotation file and map the entries to unique id and print out in tab separated format'
mapToId = subps.add_parser('mapToId',description=maphelp, formatter_class = argparse.RawTextHelpFormatter)
mapToId.add_argument('-a','--annotation',metavar= 'annotation file', help = 'flattened annotation file from "{0} annotation -h" or sliding window file from "{0} createSlidingWindows -h"'.format(prog),required=True)
mapToId.add_argument('-o','--output',metavar = 'output file',dest='output',help='region/window annotation mapped to a unique id (.txt[.gz], default: print to console)',default=None,type=str)
mapToId.add_argument('-v','--verbose',metavar='Verbose level',dest='log',help='Allowed choices: '+', '.join(loglevels)+' (default: info)',choices=loglevels,default='info')
''' ____________________ [Extraction] ___________________ '''
# extract
ehelp = 'extract: extracts crosslink sites, insertions or deletions'
echoices = ['s','i','d','m','e']
mates = [1,2]
extract = subps.add_parser('extract',description=ehelp,formatter_class=argparse.RawTextHelpFormatter) #,help='extract crosslinks'
extract.add_argument('-i','--input', metavar='input file',dest='input',help='input file (.bam, MUST be co-ordinate sorted and indexed)',required=True)
extract.add_argument('-o','--output', metavar = 'output file',dest='output',help='output file (.bed, default: print to console)',default=None,type=str)
extract.add_argument('-e','--mate', dest='mate',help='for paired end sequencing, select the read/mate to extract the crosslink sites from.\n Must be one of: {}'.format(', '.join([str(i) for i in mates])),type=int,choices=mates,required=True) # make it required ?
extract.add_argument('-s','--site',dest='choice',
help='Crosslink site choices, must be one of: {0}\n s: start site \n i: insertion site \n d: deletion site \n m: middle site \n e: end site (default: e).'.format(', '.join(echoices)),choices=echoices,default='e')
extract.add_argument('-g','--offset',metavar='offset length',dest='offset',help='Number of nucleotides to offset for crosslink sites (default: 0)',type=int,default=0)
extract.add_argument('--ignore',dest='ignore',help='flag to ignore crosslink sites outside of genome',action='store_true')
extract.add_argument('-q','--minAlignmentQuality',metavar = 'min. alignment quality',dest='minAlignmentQuality',help='minimum alignment quality (default: 10)',type=int,default=10)
extract.add_argument('-m','--minReadLength',metavar='min. read length',dest='minReadLength',help='minimum read length (default: 0)',type=int,default=0)
extract.add_argument('-x','--maxReadLength',metavar='max. read length',dest='maxReadLength',help='maximum read length (default: 500)',type=int,default=500)
extract.add_argument('-l','--maxReadInterval',metavar='max. read interval',dest='maxReadIntervalLength',help='maximum read interval length (default: 10000)',type=int,default=10000)
extract.add_argument('--primary',dest='primary',help='flag to use only primary positions of multimapping reads',action='store_true')
extract.add_argument('-c','--cores',dest='cores',metavar='cpus',help='Number of cores to use for alignment parsing (default: 5)',default=5,type=int)
extract.add_argument('-f','--chrom',metavar='chromosomes list',dest='chromFile',help='Extract crosslink sites only from chromosomes given in this file (one chromosome per line, default: None)',type=str,default=None)
extract.add_argument('-t','--tmp',dest='tmp',metavar='tmp',help='Path to create and store temp files (default behavior: use folder from "--output" parameter)',default=None,type=str)
extract.add_argument('-v','--verbose',metavar='Verbose level',dest='log',help='Allowed choices: '+', '.join(loglevels)+' (default: info)',choices=loglevels,default='info')
''' ____________________ [Counting] ___________________ '''
# count
chelp = 'count: counts the number of crosslink/deletion/insertion sites'
count = subps.add_parser('count',description=chelp,formatter_class=argparse.RawTextHelpFormatter) #help='count crosslinks',
count.add_argument('-i','--input',metavar='input bed',dest='input',help='extracted crosslink, insertion or deletion sites (.bed[.gz]), see "{} extract -h"'.format(prog),required=True)
count.add_argument('-o','--output',metavar = 'output file',dest='output',help='output count file (.txt[.gz], default: print to console)',default=None,type=str)
count.add_argument('-a','--ann',metavar = 'annotation',dest='annotation',help='''flattened annotation file (.bed[.gz])
See "{0} annotation -h" OR sliding window annotation file (.bed[.gz]), see "{0} createSlidingWindows -h"'''.format(prog),required=True)
count.add_argument('--unstranded',dest='unstranded', help='''crosslink site counting is strand specific by default.
Use this flag for non strand specific crosslink site counting''',action='store_true')
count.add_argument('--copy_tmp',dest='cpTmp', help='''In certain cases, gzip crashes on while running "htseq-clip count" with a combination of Slurm and Snakemake.
Copying files to the local temp. folder seems to get rid of the issue. Use this flag to copy files to a tmp. folder.
Default: use system specific "tmp" folder, use argument "--tmp" to specify a custom one''',action='store_true')
count.add_argument('-t','--tmp',metavar = 'temp. directory',dest='tmp',help='temp. directory path to copy files (default: None, use system tmp directory)',default=None,type=str)
count.add_argument('-v','--verbose',metavar='Verbose level',dest='log',help='Allowed choices: '+', '.join(loglevels)+' (default: info)',choices=loglevels,default='info')
''' ____________________ [Helpers] ___________________ '''
# createMatrix
cmhelp = 'createMatrix: create R friendly output matrix file from count function output files'
createMatrix = subps.add_parser('createMatrix',description=cmhelp,formatter_class = argparse.RawTextHelpFormatter)
createMatrix.add_argument('-i','--inputFolder', dest='input', metavar = 'input folder', help='Folder name with output files from count function, see "{} count -h ", supports .gz (gzipped files)'.format(prog), required = True)
createMatrix.add_argument('-b','--prefix', dest='prefix', metavar = 'file name prefix', help='Use files only with this given file name prefix (default: None)', default="", type=str)
createMatrix.add_argument('-e','--postfix', dest='postfix', metavar = 'file name postfix', help='Use files only with this given file name postfix (default: None). WARNING! either "--prefix" or "--postfix" argument must be given!', default="", type=str)
createMatrix.add_argument('-o','--output',metavar = 'output file',dest='output',help='output junction file (.txt[.gz], default: print to console)',default=None,type=str)
createMatrix.add_argument('-v','--verbose',metavar='Verbose level',dest='log',help='Allowed choices: '+', '.join(loglevels)+' (default: info)',choices=loglevels,default='info')
# Now read in arguments and process
try:
args = parser.parse_args()
if args.subparser is None:
parser.print_help(sys.stderr)
sys.exit(1)
# set logging level and handler
if args.log== 'quiet':
logger.addHandler(logging.NullHandler())
else:
logger.setLevel(logging.getLevelName(args.log.upper()))
if len(logger.handlers)>=1:
# ugly fix for multiple logging handlers
logger.handlers = []
consHandle = logging.StreamHandler(sys.stderr)
consHandle.setLevel(logging.getLevelName(args.log.upper()))
consHandle.setFormatter(logging.Formatter(' [%(levelname)s] %(message)s'))
logger.addHandler(consHandle)
logging.info('run started at {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M')))
| |
over-write because img_metas are needed as inputs for bbox_head.
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_keypoints=None,
gt_areas=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""Forward function for training mode.
Args:
x (list[Tensor]): Features from backbone.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_keypoints (Tensor): Ground truth keypoints of the image,
shape (num_gts, K*3).
gt_areas (Tensor): Ground truth mask areas of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert proposal_cfg is None, '"proposal_cfg" must be None'
outs = self(x, img_metas)
memory, mlvl_masks = outs[-2:]
outs = outs[:-2]
if gt_labels is None:
loss_inputs = outs + (gt_bboxes, gt_keypoints, gt_areas, img_metas)
else:
loss_inputs = outs + (gt_bboxes, gt_labels, gt_keypoints, gt_areas,
img_metas)
losses_and_targets = self.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses, refine_targets = losses_and_targets
# get pose refinement loss
losses = self.forward_refine(memory, mlvl_masks, refine_targets,
losses, img_metas)
return losses
@force_fp32(apply_to=('all_cls_scores', 'all_kpt_preds'))
def loss(self,
all_cls_scores,
all_kpt_preds,
enc_cls_scores,
enc_kpt_preds,
enc_hm_proto,
gt_bboxes_list,
gt_labels_list,
gt_keypoints_list,
gt_areas_list,
img_metas,
gt_bboxes_ignore=None):
""""Loss function.
Args:
all_cls_scores (Tensor): Classification score of all
decoder layers, has shape
[nb_dec, bs, num_query, cls_out_channels].
all_kpt_preds (Tensor): Sigmoid regression
outputs of all decode layers. Each is a 4D-tensor with
normalized coordinate format (x_{i}, y_{i}) and shape
[nb_dec, bs, num_query, K*2].
enc_cls_scores (Tensor): Classification scores of
points on encode feature map, has shape
(N, h*w, num_classes). Only be passed when as_two_stage is
True, otherwise is None.
enc_kpt_preds (Tensor): Regression results of each points
on the encode feature map, has shape (N, h*w, K*2). Only be
passed when as_two_stage is True, otherwise is None.
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_keypoints_list (list[Tensor]): Ground truth keypoints for each
image with shape (num_gts, K*3) in [p^{1}_x, p^{1}_y, p^{1}_v,
..., p^{K}_x, p^{K}_y, p^{K}_v] format.
gt_areas_list (list[Tensor]): Ground truth mask areas for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert gt_bboxes_ignore is None, \
f'{self.__class__.__name__} only supports ' \
f'for gt_bboxes_ignore setting to None.'
num_dec_layers = len(all_cls_scores)
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_keypoints_list = [
gt_keypoints_list for _ in range(num_dec_layers)
]
all_gt_areas_list = [gt_areas_list for _ in range(num_dec_layers)]
img_metas_list = [img_metas for _ in range(num_dec_layers)]
losses_cls, losses_kpt, losses_oks, kpt_preds_list, kpt_targets_list, \
area_targets_list, kpt_weights_list = multi_apply(
self.loss_single, all_cls_scores, all_kpt_preds,
all_gt_labels_list, all_gt_keypoints_list,
all_gt_areas_list, img_metas_list)
loss_dict = dict()
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
binary_labels_list = [
torch.zeros_like(gt_labels_list[i])
for i in range(len(img_metas))
]
enc_loss_cls, enc_losses_kpt = \
self.loss_single_rpn(
enc_cls_scores, enc_kpt_preds, binary_labels_list,
gt_keypoints_list, gt_areas_list, img_metas)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_kpt'] = enc_losses_kpt
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_kpt'] = losses_kpt[-1]
loss_dict['loss_oks'] = losses_oks[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_kpt_i, loss_oks_i in zip(
losses_cls[:-1], losses_kpt[:-1], losses_oks[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_kpt'] = loss_kpt_i
loss_dict[f'd{num_dec_layer}.loss_oks'] = loss_oks_i
num_dec_layer += 1
# losses of heatmap generated from P3 feature map
hm_pred, hm_mask = enc_hm_proto
loss_hm = self.loss_heatmap(hm_pred, hm_mask, gt_keypoints_list,
gt_labels_list, gt_bboxes_list)
loss_dict['loss_hm'] = loss_hm
return loss_dict, (kpt_preds_list[-1], kpt_targets_list[-1],
area_targets_list[-1], kpt_weights_list[-1])
def loss_heatmap(self, hm_pred, hm_mask, gt_keypoints, gt_labels,
gt_bboxes):
assert hm_pred.shape[-2:] == hm_mask.shape[-2:]
num_img, _, h, w = hm_pred.size()
# placeholder of heatmap target (Gaussian distribution)
hm_target = hm_pred.new_zeros(hm_pred.shape)
for i, (gt_label, gt_bbox, gt_keypoint) in enumerate(
zip(gt_labels, gt_bboxes, gt_keypoints)):
if gt_label.size(0) == 0:
continue
gt_keypoint = gt_keypoint.reshape(gt_keypoint.shape[0], -1,
3).clone()
gt_keypoint[..., :2] /= 8
assert gt_keypoint[..., 0].max() <= w # new coordinate system
assert gt_keypoint[..., 1].max() <= h # new coordinate system
gt_bbox /= 8
gt_w = gt_bbox[:, 2] - gt_bbox[:, 0]
gt_h = gt_bbox[:, 3] - gt_bbox[:, 1]
for j in range(gt_label.size(0)):
# get heatmap radius
kp_radius = torch.clamp(
torch.floor(
gaussian_radius((gt_h[j], gt_w[j]), min_overlap=0.9)),
min=0,
max=3)
for k in range(self.num_keypoints):
if gt_keypoint[j, k, 2] > 0:
gt_kp = gt_keypoint[j, k, :2]
gt_kp_int = torch.floor(gt_kp)
draw_umich_gaussian(hm_target[i, k], gt_kp_int,
kp_radius)
# compute heatmap loss
hm_pred = torch.clamp(
hm_pred.sigmoid_(), min=1e-4, max=1 - 1e-4) # refer to CenterNet
loss_hm = weighted_neg_loss(hm_pred, hm_target, hm_mask.unsqueeze(1))
return loss_hm * self.loss_hm_weight
def loss_single(self,
cls_scores,
kpt_preds,
gt_labels_list,
gt_keypoints_list,
gt_areas_list,
img_metas):
""""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores (Tensor): Box score logits from a single decoder layer
for all images. Shape [bs, num_query, cls_out_channels].
kpt_preds (Tensor): Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (x_{i}, y_{i}) and
shape [bs, num_query, K*2].
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_keypoints_list (list[Tensor]): Ground truth keypoints for each
image with shape (num_gts, K*3) in [p^{1}_x, p^{1}_y, p^{1}_v,
..., p^{K}_x, p^{K}_y, p^{K}_v] format.
gt_areas_list (list[Tensor]): Ground truth mask areas for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
Returns:
dict[str, Tensor]: A dictionary of loss components for outputs from
a single decoder layer.
"""
num_imgs = cls_scores.size(0)
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
kpt_preds_list = [kpt_preds[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(cls_scores_list, kpt_preds_list,
gt_labels_list, gt_keypoints_list,
gt_areas_list, img_metas)
(labels_list, label_weights_list, kpt_targets_list, kpt_weights_list,
area_targets_list, num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
kpt_targets = torch.cat(kpt_targets_list, 0)
kpt_weights = torch.cat(kpt_weights_list, 0)
area_targets = torch.cat(area_targets_list, 0)
# classification loss
cls_scores = cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(
cls_scores, labels, label_weights, avg_factor=cls_avg_factor)
# Compute the average number of gt boxes accross all gpus, for
# normalization purposes
num_total_pos = loss_cls.new_tensor([num_total_pos])
num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
# construct factors used for rescale bboxes
factors = []
for img_meta, kpt_pred in zip(img_metas, kpt_preds):
img_h, img_w, _ = img_meta['img_shape']
factor = kpt_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0).repeat(
kpt_pred.size(0), 1)
factors.append(factor)
factors = torch.cat(factors, 0)
# keypoint regression loss
kpt_preds = kpt_preds.reshape(-1, kpt_preds.shape[-1])
num_valid_kpt = torch.clamp(
reduce_mean(kpt_weights.sum()), min=1).item()
# assert num_valid_kpt == (kpt_targets>0).sum().item()
loss_kpt = self.loss_kpt(
kpt_preds, kpt_targets, kpt_weights, avg_factor=num_valid_kpt)
# keypoint oks loss
pos_inds = kpt_weights.sum(-1) > 0
factors = factors[pos_inds][:, :2].repeat(1, kpt_preds.shape[-1] // 2)
pos_kpt_preds = kpt_preds[pos_inds] * factors
pos_kpt_targets = kpt_targets[pos_inds] * factors
pos_areas = area_targets[pos_inds]
pos_valid = kpt_weights[pos_inds, 0::2]
if len(pos_areas) == 0:
loss_oks = pos_kpt_preds.sum() * 0
else:
assert (pos_areas > 0).all()
loss_oks = self.loss_oks(
pos_kpt_preds,
pos_kpt_targets,
pos_valid,
pos_areas,
avg_factor=num_total_pos)
return loss_cls, loss_kpt, loss_oks, kpt_preds, kpt_targets, \
area_targets, kpt_weights
def get_targets(self,
cls_scores_list,
kpt_preds_list,
gt_labels_list,
gt_keypoints_list,
gt_areas_list,
img_metas):
""""Compute regression and classification targets for a batch image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_scores_list (list[Tensor]): Box score logits from a single
decoder layer for each image with shape [num_query,
cls_out_channels].
kpt_preds_list (list[Tensor]): Sigmoid outputs from a single
decoder layer for each image, with normalized coordinate
(x_{i}, y_{i}) and shape [num_query, K*2].
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_keypoints_list (list[Tensor]): Ground truth keypoints for each
image with shape (num_gts, K*3).
gt_areas_list (list[Tensor]): Ground truth mask areas for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
Returns:
tuple: a tuple containing the following targets.
- labels_list (list[Tensor]): Labels for all images.
- label_weights_list (list[Tensor]): Label weights for all
images.
- kpt_targets_list (list[Tensor]): Kpt targets for all
images.
- kpt_weights_list (list[Tensor]): Kpt weights for all
images.
- area_targets_list (list[Tensor]): area targets for all
images.
- num_total_pos (int): Number of positive samples in all
images.
- num_total_neg (int): Number of negative samples in all
images.
"""
(labels_list, label_weights_list, | |
price=price['Caustics'])
# =============================================================================
# Wastewater treatment units
# =============================================================================
# Mix waste liquids for treatment
M501 = bst.units.Mixer('M501', ins=(
S403-1,
# AC401-0,
# S402-1,
F401-0,
# r_S402_s-1, r_S403_s-1, r_S404_s-1,
# X401-1, S408-0,
))
# This represents the total cost of wastewater treatment system
WWT_cost = units.WastewaterSystemCost('WWTcost501', ins=M501-0)
R501 = units.AnaerobicDigestion('R501', ins=WWT_cost-0,
outs=('biogas', 'anaerobic_treated_water',
'anaerobic_sludge'),
reactants=soluble_organics + ['TAL'],
split=find_split(splits_df.index,
splits_df['stream_611'],
splits_df['stream_612'],
chemical_groups),
T=35+273.15)
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
# Mix recycled stream and wastewater after R501
M502 = bst.units.Mixer('M502', ins=(R501-1, ''))
@M502.add_specification(run=True)
def M502_spec():
M503._run()
R502 = units.AerobicDigestion('R502', ins=(M502-0, air_lagoon, aerobic_caustic),
outs=('aerobic_vent', 'aerobic_treated_water'),
reactants=soluble_organics,
ratio=get_flow_tpd()/2205)
# Membrane bioreactor to split treated wastewater from R502
S501 = bst.units.Splitter('S501', ins=R502-1, outs=('membrane_treated_water',
'membrane_sludge'),
split=find_split(splits_df.index,
splits_df['stream_624'],
splits_df['stream_625'],
chemical_groups))
S501.line = 'Membrane bioreactor'
# Recycled sludge stream of memberane bioreactor, the majority of it (96%)
# goes to aerobic digestion and the rest to sludge holding tank then to BT
S502 = bst.units.Splitter('S502', ins=S501-1, outs=('to_aerobic_digestion',
'to_boiler_turbogenerator'),
split=0.96)
M503 = bst.units.Mixer('M503', ins=(S502-0, 'centrate'), outs=1-M502)
# Mix anaerobic and 4% of membrane bioreactor sludge
M504 = bst.units.Mixer('M504', ins=(R501-2, S502-1))
# Sludge centrifuge to separate water (centrate) from sludge
S503 = bst.units.Splitter('S503', ins=M504-0, outs=(1-M503, 'sludge'),
split=find_split(splits_df.index,
splits_df['stream_616'],
splits_df['stream_623'],
chemical_groups))
S503.line = 'Sludge centrifuge'
# Reverse osmosis to treat membrane separated water
S504 = bst.units.Splitter('S504', ins=S501-0, outs=('discharged_water', 'waste_brine'),
split=find_split(splits_df.index,
splits_df['stream_626'],
splits_df['stream_627'],
chemical_groups))
S504.line = 'Reverse osmosis'
# Mix solid wastes to boiler turbogenerator
M505 = bst.units.Mixer('M505', ins=(S503-1,
S401-0,
# F401-0, D401-0,
),
outs='wastes_to_boiler_turbogenerator')
# %%
# =============================================================================
# Facilities streams
# =============================================================================
ethanol_fresh = Stream('ethanol_fresh', price=price['Ethanol'])
sulfuric_acid_fresh = Stream('sulfuric_acid_fresh', price=price['Sulfuric acid'])
# TCP_fresh = Stream('TCP_fresh', price=price['TCP'])
ammonia_fresh = Stream('ammonia_fresh', price=price['AmmoniumHydroxide'])
CSL_fresh = Stream('CSL_fresh', price=price['CSL'])
# lime_fresh = Stream('lime_fresh', price=price['Lime'])
HCl_fresh = Stream('HCl_fresh', price=price['HCl'])
octanol_fresh = Stream('octanol_fresh', price=price['Hexanol'])
H2_fresh = Stream('H2_fresh', price=price['Hydrogen'])
# heptane_fresh = Stream('heptane_fresh', price=price['Heptane'])
# toluene_fresh = Stream('toluene_fresh', price=price['Toluene'])
# hexanol_fresh_s = Stream('hexanol_fresh_s', price=price['Hexanol'])
heptane_fresh_s = Stream('heptane_fresh_s', price=price['Heptane'])
toluene_fresh_s = Stream('toluene_fresh_s', price=price['Toluene'])
hydrogen_fresh = Stream('hydrogen_fresh', price=price['Hydrogen'])
KOH_fresh = Stream('KOH_fresh', price=price['KOH'])
# S401_out1_F_mass = S401.outs[1].F_mass
# if not (S401_out1_F_mass == 0):
# ethanol_fresh = Stream('ethanol_fresh', Ethanol = 0.24 * S401_out1_F_mass, units='kg/hr', price=price['Ethanol']) - M401.ins[3].imass['Ethanol']
# DPHP_fresh = Stream('DPHP_fresh', DPHP = 0.25 * S401_out1_F_mass, units='kg/hr', price=price['DPHP']) - M401.ins[3].imass['Dipotassium hydrogen phosphate']
# else:
# ethanol_fresh = Stream('ethanol_fresh', Ethanol = get_feedstock_dry_mass()*48*22.1/1000*0.93, units='kg/hr', price=price['Ethanol'])
# DPHP_fresh = Stream('DPHP_fresh', DPHP = get_feedstock_dry_mass()*50*22.1/1000*0.93, units='kg/hr', price=price['DPHP'])
# Water used to keep system water usage balanced
system_makeup_water = Stream('system_makeup_water', price=price['Makeup water'])
# TAL stream
# TAL = Stream('TAL', units='kg/hr', price=price['TAL'])
# SA product
Mixed_esters = Stream('Mixed_esters', units='kg/hr', price=price['SA'])
# Acetoin product
# Acetoin = Stream('Acetoin', units='kg/hr', price=price['Acetoin'])
# # Isobutyraldehyde product
# IBA = Stream('IBA', units='kg/hr', price=price['IBA'])
# Chemicals used/generated in BT
# FGD_lime = Stream('FGD_lime')
ash = Stream('ash', price=price['Ash disposal'])
# boiler_chems = Stream('boiler_chems', price=price['Boiler chems'])
# baghouse_bag = Stream('baghouse_bag', price=price['Baghouse bag'])
# Supplementary natural gas for BT if produced steam not enough for regenerating
# all steam streams required by the system
# natural_gas = Stream('natural_gas', price=price['Natural gas'])
# Cooling tower chemicals
cooling_tower_chems = Stream('cooling_tower_chems', price=price['Cooling tower chems'])
# 145 based on equipment M-910 (clean-in-place system) in Humbird et al.
CIP_chems_in = Stream('CIP_chems_in', Water=145*get_flow_tpd()/2205, units='kg/hr')
# 1372608 based on stream 950 in Humbird et al.
# Air needed for multiple processes (including enzyme production that was not included here),
# not rigorously modeled, only scaled based on plant size
plant_air_in = Stream('plant_air_in', phase='g', units='kg/hr',
N2=0.79*1372608*get_flow_tpd()/2205,
O2=0.21*1372608*get_flow_tpd()/2205)
# 8021 based on stream 713 in Humbird et al.
fire_water_in = Stream('fire_water_in',
Water=8021*get_flow_tpd()/2205, units='kg/hr')
# =============================================================================
# Facilities units
# =============================================================================
T601 = bst.units.StorageTank('T601', ins=octanol_fresh,
# outs=Octanol_esterification,
)
T601.line = 'Octanol storage tank'
T601_P = units.TALPump('T601_P', ins=T601-0, outs = Octanol_esterification)
T602 = bst.units.StorageTank('T602', ins=H2_fresh,
# outs=H2_esterification,
)
T602.line = 'H2 storage tank'
T602_P = units.TALPump('T602_P', ins=T602-0, outs = H2_hydrogenation)
# S601 = bst.units.ReversedSplitter('S601', ins=T601-0,
# outs=(pretreatment_sulfuric_acid,
# ''))
# T608 = units.TCPStorageTank('T608', ins=TCP_fresh,
# outs='TCP_catalyst')
# T608-0-3-R401
# T608.line = 'Tricalcium diphosphate storage tank'
#
# T602 = units.AmmoniaStorageTank('T602', ins=ammonia_fresh, outs=ammonia_M205)
# T602.line = 'Ammonia storage tank'
T603 = units.CSLstorageTank('T603', ins=CSL_fresh, outs=CSL)
T603.line = 'CSL storage tank'
# Ethanol storage
T604 = bst.units.StorageTank('T604', ins=ethanol_fresh)
T604.line = 'Ethanol storage tank'
T604_P = units.TALPump('T604_P', ins=T604-0, outs = Ethanol_desorption)
# T604_P = bst.units.ConveyingBelt('T604_P', ins=T604-0, outs = Hexanol)
# T607_P = units.TALPump('T607_P', ins=T607-0, outs = Hydrogen)
# Connections to ATPE Mixer
# T604_P-0-1-M401
# T605_P-0-2-M401
# 7-day storage time, similar to ethanol's in Humbird et al.
T620 = units.TALStorageTank('T620', ins=F401-1, tau=7*24, V_wf=0.9,
vessel_type='Floating roof',
vessel_material='Stainless steel')
T620.line = 'EstersStorageTank'
T620_P = units.TALPump('T620_P', ins=T620-0, outs=Mixed_esters)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T607 = units.TALStorageTank('T607', ins=D402_H-0, tau=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T607.line = 'AcetoinStorageTank'
# T607_P = units.TALPump('T607_P', ins=T607-0, outs=Acetoin)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T608 = units.TALStorageTank('T608', ins=D403_H-0, tau=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T608.line = 'IBAStorageTank'
# T608_P = units.TALPump('T608_P', ins=T608-0, outs=IBA)
CIP = facilities.CIP('CIP901', ins=CIP_chems_in, outs='CIP_chems_out')
ADP = facilities.ADP('ADP902', ins=plant_air_in, outs='plant_air_out',
ratio=get_flow_tpd()/2205)
FWT = units.FireWaterTank('FWT903', ins=fire_water_in, outs='fire_water_out')
CWP = facilities.CWP('CWP802', ins='return_chilled_water',
outs='process_chilled_water')
# M505-0 is the liquid/solid mixture, R501-0 is the biogas, blowdown is discharged
# BT = facilities.BT('BT', ins=(M505-0, R501-0,
# FGD_lime, boiler_chems,
# baghouse_bag, natural_gas,
# 'BT_makeup_water'),
# B_eff=0.8, TG_eff=0.85,
# combustibles=combustibles,
# side_streams_to_heat=(water_M201, water_M202, steam_M203),
# outs=('gas_emission', ash, 'boiler_blowdown_water'))
BT = bst.facilities.BoilerTurbogenerator('BT701',
ins=(M505-0,
R501-0,
'boiler_makeup_water',
'natural_gas',
'lime',
'boilerchems'),
outs=('gas_emission', 'boiler_blowdown_water', ash,),
turbogenerator_efficiency=0.85,
natural_gas_price=price['Natural gas'])
# BT = bst.BDunits.BoilerTurbogenerator('BT',
# ins=(M505-0, R501-0, 'boiler_makeup_water', 'natural_gas', FGD_lime, boiler_chems),
# boiler_efficiency=0.80,
# turbogenerator_efficiency=0.85)
# Blowdown is discharged
CT = facilities.CT('CT801', ins=('return_cooling_water', cooling_tower_chems,
'CT_makeup_water'),
outs=('process_cooling_water', 'cooling_tower_blowdown'))
# All water used in the system, here only consider water usage,
# if heating needed, then heeating duty required is considered in BT
process_water_streams = (enzyme_water,
aerobic_caustic,
CIP.ins[-1], BT.ins[-1], CT.ins[-1])
PWC = facilities.PWC('PWC904', ins=(system_makeup_water, S504-0),
process_water_streams=process_water_streams,
recycled_blowdown_streams=None,
outs=('process_water', 'discharged_water'))
# Heat exchange network
HXN = bst.facilities.HeatExchangerNetwork('HXN1001',
ignored=lambda:[
# H401,
# H402,
# H403,
# H404,
# AC401.heat_exchanger_drying,
# AC401.heat_exchanger_regeneration,
# F401.components['condenser'],
],
cache_network=True,
force_ideal_thermo=True,
)
# HXN = HX_Network('HXN')
# %%
# =============================================================================
# Complete system
# =============================================================================
TAL_sys = create_TAL_sys()
f = bst.main_flowsheet
u = f.unit
s = f.stream
feedstock = s.feedstock
Mixed_esters = s.Mixed_esters
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
TEA_feeds = set([i for i in TAL_sys.feeds if i.price]+ \
[i for i in TAL_sys.feeds if i.price])
TEA_products = set([i for i in TAL_sys.products if i.price]+ \
[i for i in TAL_sys.products if i.price]+[Mixed_esters])
for ui in u:
globals().update({ui.ID: ui})
# %%
# =============================================================================
# TEA
# =============================================================================
# TAL_tea = CellulosicEthanolTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
# depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
# lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
# startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
# startup_VOCfrac=0.75, WC_over_FCI=0.05,
# finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# # biosteam Splitters and Mixers have no cost,
# # cost of all wastewater treatment units are included in WWT_cost,
# # BT is not included in this TEA
# OSBL_units=(u.U101, u.WWT_cost,
# u.T601, u.T602, u.T603, u.T606, u.T606_P,
# u.CWP, u.CT, u.PWC, u.CIP, u.ADP, u.FWT, u.BT),
# warehouse=0.04, site_development=0.09, additional_piping=0.045,
# proratable_costs=0.10, field_expenses=0.10, construction=0.20,
# contingency=0.10, other_indirect_costs=0.10,
# labor_cost=3212962*get_flow_tpd()/2205,
# labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
# steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT)
# TAL_no_BT_tea = TAL_tea
TAL_tea = TALTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
startup_VOCfrac=0.75, WC_over_FCI=0.05,
finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# biosteam Splitters and Mixers have no cost,
# cost of all wastewater treatment units are included in WWT_cost,
# BT is not included in this TEA
OSBL_units=(u.U101, u.WWTcost501,
# u.T601, u.T602,
u.T603, u.T604, u.T620,
# u.T606, u.T606_P,
u.CWP802, u.CT801, u.PWC904, u.CIP901, u.ADP902, u.FWT903, u.BT701),
warehouse=0.04, site_development=0.09, additional_piping=0.045,
proratable_costs=0.10, field_expenses=0.10, construction=0.20,
contingency=0.10, other_indirect_costs=0.10,
labor_cost=3212962*get_flow_tpd()/2205,
labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT701)
TAL_no_BT_tea = TAL_tea
# # Removed because | |
= "#a9f971"
#~ GR_color_fit = "#ed0dd9"
GR_color_fit = self._luminosity_color(GR_color_markers, 1.1)
plot_alpha = 0.6
#~ lets plot severEOSs on the up plot and eventually cut out data
for label, data, eos in zip( all_label, all_data, severalEOSs ):
#~ we set mimimal compactenss threshold and cut out all entries
#~ who are below it only if we are interested in stable solutions
if append_stable:
_min_x = list(
map(lambda _: _ >= min_compactness, data[0])
).index(True)
data[0] = [ _ for _ in data[0][_min_x:] ]
data[1] = [ _ for _ in data[1][_min_x:] ]
max_x = _get_max(data[0], max_x)
min_x = _get_min(data[0], min_x)
max_y = _get_max(data[1], max_y)
min_y = _get_min(data[1], min_y)
ax_up.plot(
data[0],
data[1],
label = None,
linewidth = 0,
markersize = 5.5,
markevery = self._get_markevry(data[0], data[1], amount_points=20),
**self._get_plot_keywords(
markers, colors, linestyles,
{
"name": eos["name"],
"m": eos["m"],
"lambda": eos["lambda"]
}
),
alpha = plot_alpha
)
#~ will use this foreach to fill the polyfit for GR
polyfit_res = []
xp = []
yp = []
#~ plot all GR data and gather all x and y for evaluating the polyfit
for label, data, eos in zip(
all_label_GR,
all_data_GR,
[ _ for _ in set( [ _["name"] for _ in severalEOSs ] ) ]
):
#~ we set mimimal compactenss threshold and cut out all entries
#~ who are below it only if we are interested in stable solutions
if append_stable:
_min_x = list(
map(lambda _: _ >= min_compactness, data[0])
).index(True)
data[0] = [ _ for _ in data[0][_min_x:] ]
data[1] = [ _ for _ in data[1][_min_x:] ]
max_x = _get_max(data[0], max_x)
min_x = _get_min(data[0], min_x)
max_y = _get_max(data[1], max_y)
min_y = _get_min(data[1], min_y)
ax_up.plot(
data[0],
data[1],
label = None,
linewidth = 0,
markersize = 5.5,
markevery = self._get_markevry(data[0], data[1], amount_points=20),
marker = markers.get(eos, None),
color = GR_color_markers,
markerfacecolor = GR_color_markers,
markeredgecolor = GR_color_markers,
alpha = plot_alpha
)
xp.append(data[0])
yp.append(data[1])
coef, chi_red, p = _get_polyfit_res(xp, yp)
max_y_down = 0
min_y_down = 1e9
#~ average over all EOSs of all the residuals
delta_all = 0
n_all = 0
#~ average over all EOSs of largest residual
delta_all_max = 0
n_all_max = 0
#~ the largest residual across all EOSs
delta_max = 0
#~ for the generated polyfit function calcualte the
#~ relative error and plot it donw for GR
for label, data, eos in zip(
all_label_GR,
all_data_GR,
[ _ for _ in set( [ _["name"] for _ in severalEOSs ] ) ]
):
_data = [
abs(1 - _/p(__)) for _,__ in zip(data[1], data[0])
]
delta_all += sum(_data)
n_all += len(_data)
delta_all_max += max(_data)
n_all_max += 1
delta_max = _get_max(_data, delta_max)
max_y_down = _get_max(_data, max_y_down)
min_y_down = _get_min(_data, min_y_down)
ax_down.plot(
data[0],
_data,
label = None,
linewidth = 0,
markersize = 5.5,
markevery = self._get_markevry(data[0], _data),
marker = markers.get(eos, None),
color = GR_color_markers,
markerfacecolor = GR_color_markers,
markeredgecolor = GR_color_markers,
alpha = plot_alpha
)
avg_L_1 = delta_all/n_all
avg_L_inf = delta_all_max/n_all_max
L_inf_worst = delta_max
print(
"\n GR fit"
"\n\t $\chi_r^2$ = {:.3e}"
"\n\t $a_0$ = {:.3e}"
"\n\t $a_1$ = {:.3e}"
"\n\t $a_4$ = {:.3e}"
"\n\t $< L_1 >$ = {:.3e}"
"\n\t $< L_\inf >$ = {:.3e}"
"\n\t $ L_\inf $ = {:.3e}\n".format(
chi_red,
coef[0],
coef[1],
coef[4],
avg_L_1,
avg_L_inf,
L_inf_worst
)
)
lines_polyfit = [
Line2D(
[0], [0],
color = GR_color_fit,
marker = None,
linestyle = "-",
linewidth = 1.5,
label = "GR fit"
)
]
p_x = np.linspace(min_x, max_x, 100)
p_y = [ p(_) for _ in p_x ]
#~ generate 100 points between min and max of x and plot it values
ax_up.plot(
p_x,
p_y,
label = None,
linewidth = 2.5,
linestyle = "-",
markersize = 0,
markevery = 0,
marker = None,
color = GR_color_fit,
zorder = 100
)
ax_up.fill_between(
p_x,
np.array(p_y)*(1 + avg_L_inf),
np.array(p_y)*(1 - avg_L_inf),
facecolor=GR_color_markers,
alpha= plot_alpha - 0.2
)
ax_up.fill_between(
p_x,
np.array(p_y)*( 1 + L_inf_worst ),
np.array(p_y)*( 1 - L_inf_worst ),
facecolor=GR_color_markers,
alpha= plot_alpha - 0.4
)
#~ now do the same for each color if there are more than 1
for c, l in itertools.product(colors.items(), linestyles.items()):
#~ the colors will have label key containing the name of parameter
#~ which they represetn
if c[0] == "label" or l[0] == "label":
continue
xp = []
yp = []
for data, eos in zip(all_data, severalEOSs):
#~ if the current eos has parameter value equal to the current one
#~ lets append its data
if eos[colors["label"]] == c[0] and eos[linestyles["label"]] == l[0]:
xp.append( data[0] )
yp.append( data[1] )
#~ colors and linestyles have all possible combinations of EOSs
#~ but we may not need all of them but only porsion determineed
#~ by severalEOSs, so a quick fix, if no points added we just continue
if not xp:
continue
#~ expand all the data into flat list to calculate the polyfit
coef, chi_red, p = _get_polyfit_res(xp, yp)
max_y_down = 0
min_y_down = 1e9
#~ average over all EOSs of all the residuals
delta_all = 0
n_all = 0
#~ average over all EOSs of largest residual
delta_all_max = 0
n_all_max = 0
#~ the largest residual across all EOSs
delta_max = 0
for data, eos in zip(all_data, severalEOSs):
#~ if the current eos has parameter value equal to the current one
#~ lets append its data
if eos[colors["label"]] == c[0] and eos[linestyles["label"]] == l[0]:
_data = [
abs(1 - _/p(__)) for _,__ in zip(data[1], data[0])
]
delta_all += sum(_data)
n_all += len(_data)
delta_all_max += max(_data)
n_all_max += 1
delta_max = _get_max(_data, delta_max)
max_y_down = _get_max(_data, max_y_down)
min_y_down = _get_min(_data, min_y_down)
ax_down.plot(
data[0],
_data,
label = None,
linewidth = 0,
markersize = 5.5,
markevery = self._get_markevry(data[0], _data),
marker = markers.get(eos["name"], None),
color = c[1],
markerfacecolor = c[1],
markeredgecolor = c[1],
alpha = plot_alpha - 0.1
)
avg_L_1 = delta_all/n_all
avg_L_inf = delta_all_max/n_all_max
L_inf_worst = delta_max
print(
"\n lambda = {:.3e}, m = {:.3e} fit"
"\n\t $\chi_r^2$ = {:.3e}"
"\n\t $a_0$ = {:.3e}"
"\n\t $a_1$ = {:.3e}"
"\n\t $a_4$ = {:.3e}"
"\n\t $< L_1 >$ = {:.3e}"
"\n\t $< L_\inf >$ = {:.3e}"
"\n\t $ L_\inf $ = {:.3e}\n".format(
l[0] if linestyles["label"] == "lambda" else c[0],
c[0] if colors["label"] == "m" else l[0],
chi_red,
coef[0],
coef[1],
coef[4],
avg_L_1,
avg_L_inf,
L_inf_worst
)
)
#~ lines_polyfit.append(
#~ Line2D(
#~ [0], [0],
#~ color = c[1],
#~ marker = None,
#~ linestyle = l[1],
#~ linewidth = 1.5,
#~ label = "$\\lambda$ = {:.3e},\n m = {:.3e} fit".format(
#~ l[0] if linestyles["label"] == "lambda" else c[0],
#~ c[0] if colors["label"] == "m" else l[0]
#~ )
#~ )
#~ )
p_x = np.linspace(min_x, max_x, 100)
p_y = [ p(_) for _ in p_x ]
ax_up.plot(
p_x,
p_y,
label = None,
linewidth = 2.5,
linestyle = l[1],
markersize = 0,
markevery = 0,
marker = None,
color = self._luminosity_color(c[1], 1.1),
zorder = 90
)
#~ ax_up.fill_between(
#~ p_x,
#~ np.array(p_y)*(1 + avg_L_inf),
#~ np.array(p_y)*(1 - avg_L_inf),
#~ facecolor=c[1],
#~ alpha= plot_alpha - 0.25
#~ )
#~ ax_up.fill_between(
#~ p_x,
#~ np.array(p_y)*( 1 + L_inf_worst ),
#~ np.array(p_y)*( 1 - L_inf_worst ),
#~ facecolor=c[1],
#~ alpha= plot_alpha - 0.5
#~ )
lines_markers, lines_colors, lines_linestyles = self._get_lines_MSs_Cs_LSs(
markers, colors, linestyles, severalEOSs
)
ax_up.add_artist( ax_up.legend(
handles = [
*lines_markers
],
loc="upper left",
fontsize=10,
handlelength=3,
numpoints=1,
fancybox=True,
markerscale = 1.5,
ncol = 3,
frameon = False,
mode = None
) )
ax_up.add_artist( ax_up.legend(
handles = [
*lines_colors, *lines_polyfit, *lines_linestyles
],
loc="lower right",
fontsize=10,
handlelength=3,
numpoints=1,
fancybox=True,
markerscale = 1.5,
ncol = 2,
frameon = False,
mode = None
) )
ax_up.set_xlim(min_x, max_x)
ax_up.set_ylim(min_y, max_y)
ax_down.set_ylim(1e-3, 1.5e0)
plt.savefig(
'uniTilde.eps', format="eps",
bbox_inches='tight',
dpi=1200,
pad_inches=0
)
plt.show()
return
def plot_severalEOSs_uniBarI(self, severalEOSs ):
"""
plot severalEOS unifersal Tilde I relationships
<severalEOSs> with dictionaries see get_severalEOS_data for the format
EXAMPLE INPUT
severalEOSs = [
{ "name": "SLy4", "beta": 0, "m": 0, "lambda": 0 },
{ "name": "APR4", "beta": 0, "m": 0, "lambda": 0 },
{ "name": "FPS", "beta": 0, "m": | |
row.
builder.insert_cell()
builder.write("Row 2, Cell 1 Content")
# Build the second cell.
builder.insert_cell()
builder.write("Row 2, Cell 2 Content.")
builder.end_row()
# Signal that we have finished building the table.
builder.end_table()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.create_simple_table.docx")
#ExEnd:CreateSimpleTable
def test_formatted_table(self):
#ExStart:FormattedTable
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
table = builder.start_table()
builder.insert_cell()
# Table wide formatting must be applied after at least one row is present in the table.
table.left_indent = 20.0
# Set height and define the height rule for the header row.
builder.row_format.height = 40.0
builder.row_format.height_rule = aw.HeightRule.AT_LEAST
builder.cell_format.shading.background_pattern_color = drawing.Color.from_argb(198, 217, 241)
builder.paragraph_format.alignment = aw.ParagraphAlignment.CENTER
builder.font.size = 16
builder.font.name = "Arial"
builder.font.bold = True
builder.cell_format.width = 100.0
builder.write("Header Row,\n Cell 1")
# We don't need to specify this cell's width because it's inherited from the previous cell.
builder.insert_cell()
builder.write("Header Row,\n Cell 2")
builder.insert_cell()
builder.cell_format.width = 200.0
builder.write("Header Row,\n Cell 3")
builder.end_row()
builder.cell_format.shading.background_pattern_color = drawing.Color.white
builder.cell_format.width = 100.0
builder.cell_format.vertical_alignment = aw.tables.CellVerticalAlignment.CENTER
# Reset height and define a different height rule for table body.
builder.row_format.height = 30.0
builder.row_format.height_rule = aw.HeightRule.AUTO
builder.insert_cell()
# Reset font formatting.
builder.font.size = 12
builder.font.bold = False
builder.write("Row 1, Cell 1 Content")
builder.insert_cell()
builder.write("Row 1, Cell 2 Content")
builder.insert_cell()
builder.cell_format.width = 200.0
builder.write("Row 1, Cell 3 Content")
builder.end_row()
builder.insert_cell()
builder.cell_format.width = 100.0
builder.write("Row 2, Cell 1 Content")
builder.insert_cell()
builder.write("Row 2, Cell 2 Content")
builder.insert_cell()
builder.cell_format.width = 200.0
builder.write("Row 2, Cell 3 Content.")
builder.end_row()
builder.end_table()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.formatted_table.docx")
#ExEnd:FormattedTable
def test_nested_table(self):
#ExStart:NestedTable
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
cell = builder.insert_cell()
builder.writeln("Outer Table Cell 1")
builder.insert_cell()
builder.writeln("Outer Table Cell 2")
# This call is important to create a nested table within the first table.
# Without this call, the cells inserted below will be appended to the outer table.
builder.end_table()
# Move to the first cell of the outer table.
builder.move_to(cell.first_paragraph)
# Build the inner table.
builder.insert_cell()
builder.writeln("Inner Table Cell 1")
builder.insert_cell()
builder.writeln("Inner Table Cell 2")
builder.end_table()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.nested_table.docx")
#ExEnd:NestedTable
def test_combine_rows(self):
#ExStart:CombineRows
doc = aw.Document(MY_DIR + "Tables.docx")
# The rows from the second table will be appended to the end of the first table.
first_table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
second_table = doc.get_child(aw.NodeType.TABLE, 1, True).as_table()
# Append all rows from the current table to the next tables
# with different cell count and widths can be joined into one table.
while second_table.has_child_nodes:
first_table.rows.add(second_table.first_row)
second_table.remove()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.combine_rows.docx")
#ExEnd:CombineRows
def test_split_table(self):
#ExStart:SplitTable
doc = aw.Document(MY_DIR + "Tables.docx")
first_table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
# We will split the table at the third row (inclusive).
row = first_table.rows[2]
# Create a new container for the split table.
table = first_table.clone(False).as_table()
# Insert the container after the original.
first_table.parent_node.insert_after(table, first_table)
# Add a buffer paragraph to ensure the tables stay apart.
first_table.parent_node.insert_after(aw.Paragraph(doc), first_table)
while True:
current_row = first_table.last_row
table.prepend_child(current_row)
if current_row == row:
break
doc.save(ARTIFACTS_DIR + "WorkingWithTables.split_table.docx")
#ExEnd:SplitTable
def test_row_format_disable_break_across_pages(self):
#ExStart:RowFormatDisableBreakAcrossPages
doc = aw.Document(MY_DIR + "Table spanning two pages.docx")
table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
# Disable breaking across pages for all rows in the table.
for row in table.rows:
row.as_row().row_format.allow_break_across_pages = False
doc.save(ARTIFACTS_DIR + "WorkingWithTables.row_format_disable_break_across_pages.docx")
#ExEnd:RowFormatDisableBreakAcrossPages
def test_keep_table_together(self):
#ExStart:KeepTableTogether
doc = aw.Document(MY_DIR + "Table spanning two pages.docx")
table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
# We need to enable KeepWithNext for every paragraph in the table to keep it from breaking across a page,
# except for the last paragraphs in the last row of the table.
for cell in table.get_child_nodes(aw.NodeType.CELL, True):
cell = cell.as_cell()
cell.ensure_minimum()
for para in cell.paragraphs:
para = para.as_paragraph()
if not (cell.parent_row.is_last_row and para.is_end_of_cell):
para.paragraph_format.keep_with_next = True
doc.save(ARTIFACTS_DIR + "WorkingWithTables.keep_table_together.docx")
#ExEnd:KeepTableTogether
def test_check_cells_merged(self):
#ExStart:CheckCellsMerged
doc = aw.Document(MY_DIR + "Table with merged cells.docx")
table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
for row in table.rows:
for cell in row.as_row().cells:
print(self.print_cell_merge_type(cell.as_cell()))
#ExEnd:CheckCellsMerged
#ExStart:PrintCellMergeType
@staticmethod
def print_cell_merge_type(cell: aw.tables.Cell):
is_horizontally_merged = cell.cell_format.horizontal_merge != aw.tables.CellMerge.NONE
is_vertically_merged = cell.cell_format.vertical_merge != aw.tables.CellMerge.NONE
cell_location = f"R{cell.parent_row.parent_table.index_of(cell.parent_row) + 1}, C{cell.parent_row.index_of(cell) + 1}"
if is_horizontally_merged and is_vertically_merged:
return f"The cell at {cell_location} is both horizontally and vertically merged"
if is_horizontally_merged:
return f"The cell at {cell_location} is horizontally merged."
if is_vertically_merged:
return f"The cell at {cell_location} is vertically merged"
return f"The cell at {cell_location} is not merged"
#ExEnd:PrintCellMergeType
def test_vertical_merge(self):
#ExStart:VerticalMerge
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_cell()
builder.cell_format.vertical_merge = aw.tables.CellMerge.FIRST
builder.write("Text in merged cells.")
builder.insert_cell()
builder.cell_format.vertical_merge = aw.tables.CellMerge.NONE
builder.write("Text in one cell")
builder.end_row()
builder.insert_cell()
# This cell is vertically merged to the cell above and should be empty.
builder.cell_format.vertical_merge = aw.tables.CellMerge.PREVIOUS
builder.insert_cell()
builder.cell_format.vertical_merge = aw.tables.CellMerge.NONE
builder.write("Text in another cell")
builder.end_row()
builder.end_table()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.vertical_merge.docx")
#ExEnd:VerticalMerge
def test_horizontal_merge(self):
#ExStart:HorizontalMerge
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_cell()
builder.cell_format.horizontal_merge = aw.tables.CellMerge.FIRST
builder.write("Text in merged cells.")
builder.insert_cell()
# This cell is merged to the previous and should be empty.
builder.cell_format.horizontal_merge = aw.tables.CellMerge.PREVIOUS
builder.end_row()
builder.insert_cell()
builder.cell_format.horizontal_merge = aw.tables.CellMerge.NONE
builder.write("Text in one cell.")
builder.insert_cell()
builder.write("Text in another cell.")
builder.end_row()
builder.end_table()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.horizontal_merge.docx")
#ExEnd:HorizontalMerge
def test_merge_cell_range(self):
#ExStart:MergeCellRange
doc = aw.Document(MY_DIR + "Table with merged cells.docx")
table = doc.first_section.body.tables[0]
# We want to merge the range of cells found inbetween these two cells.
cell_start_range = table.rows[0].cells[0]
cell_end_range = table.rows[1].cells[1]
# Merge all the cells between the two specified cells into one.
self.merge_cells(cell_start_range, cell_end_range)
doc.save(ARTIFACTS_DIR + "WorkingWithTables.merge_cell_range.docx")
#ExEnd:MergeCellRange
def test_convert_to_horizontally_merged_cells(self):
#ExStart:ConvertToHorizontallyMergedCells
doc = aw.Document(MY_DIR + "Table with merged cells.docx")
table = doc.first_section.body.tables[0]
# Now merged cells have appropriate merge flags.
table.convert_to_horizontally_merged_cells()
#ExEnd:ConvertToHorizontallyMergedCells
#ExStart:MergeCells
@staticmethod
def merge_cells(start_cell: aw.tables.Cell, end_cell: aw.tables.Cell):
parent_table = start_cell.parent_row.parent_table
# Find the row and cell indices for the start and end cell.
start_cell_pos = drawing.Point(start_cell.parent_row.index_of(start_cell), parent_table.index_of(start_cell.parent_row))
end_cell_pos = drawing.Point(end_cell.parent_row.index_of(end_cell), parent_table.index_of(end_cell.parent_row))
# Create a range of cells to be merged based on these indices.
# Inverse each index if the end cell is before the start cell.
merge_range = drawing.Rectangle(
min(start_cell_pos.x, end_cell_pos.x),
min(start_cell_pos.y, end_cell_pos.y),
abs(end_cell_pos.x - start_cell_pos.x) + 1,
abs(end_cell_pos.y - start_cell_pos.y) + 1)
for row in parent_table.rows:
row = row.as_row()
for cell in row.cells:
cell = cell.as_cell()
current_pos = drawing.Point(row.index_of(cell), parent_table.index_of(row))
# Check if the current cell is inside our merge range, then merge it.
if merge_range.contains(current_pos):
cell.cell_format.horizontal_merge = aw.tables.CellMerge.FIRST if current_pos.x == merge_range.x else aw.tables.CellMerge.PREVIOUS
cell.cell_format.vertical_merge = aw.tables.CellMerge.FIRST if current_pos.y == merge_range.y else aw.tables.CellMerge.PREVIOUS
#ExEnd:MergeCells
def test_repeat_rows_on_subsequent_pages(self):
#ExStart:RepeatRowsOnSubsequentPages
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.start_table()
builder.row_format.heading_format = True
builder.paragraph_format.alignment = aw.ParagraphAlignment.CENTER
builder.cell_format.width = 100
builder.insert_cell()
builder.writeln("Heading row 1")
builder.end_row()
builder.insert_cell()
builder.writeln("Heading row 2")
builder.end_row()
builder.cell_format.width = 50
builder.paragraph_format.clear_formatting()
for _ in range(50):
builder.insert_cell()
builder.row_format.heading_format = False
builder.write("Column 1 Text")
builder.insert_cell()
builder.write("Column 2 Text")
builder.end_row()
doc.save(ARTIFACTS_DIR + "WorkingWithTables.repeat_rows_on_subsequent_pages.docx")
#ExEnd:RepeatRowsOnSubsequentPages
def test_auto_fit_to_page_width(self):
#ExStart:AutoFitToPageWidth
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert a table with a width that takes up half the page width.
table = builder.start_table()
builder.insert_cell()
table.preferred_width = aw.tables.PreferredWidth.from_percent(50)
builder.writeln("Cell #1")
builder.insert_cell()
builder.writeln("Cell #2")
builder.insert_cell()
builder.writeln("Cell #3")
doc.save(ARTIFACTS_DIR + "WorkingWithTables.auto_fit_to_page_width.docx")
#ExEnd:AutoFitToPageWidth
def test_preferred_width_settings(self):
#ExStart:PreferredWidthSettings
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert a table row made up of three cells which have different preferred widths.
builder.start_table()
# Insert an absolute sized cell.
builder.insert_cell()
builder.cell_format.preferred_width = aw.tables.PreferredWidth.from_points(40)
builder.cell_format.shading.background_pattern_color = drawing.Color.light_yellow
builder.writeln("Cell at 40 points width")
# Insert a relative (percent) sized cell.
builder.insert_cell()
builder.cell_format.preferred_width = aw.tables.PreferredWidth.from_percent(20)
builder.cell_format.shading.background_pattern_color = drawing.Color.light_blue
builder.writeln("Cell at 20% width")
# Insert a auto sized cell.
builder.insert_cell()
builder.cell_format.preferred_width = aw.tables.PreferredWidth.AUTO
builder.cell_format.shading.background_pattern_color = drawing.Color.light_green
builder.writeln(
"Cell automatically sized. The size of this cell is calculated from the table preferred width.")
builder.writeln("In this case the cell will fill up the rest of the available space.")
doc.save(ARTIFACTS_DIR + "WorkingWithTables.preferred_width_settings.docx")
#ExEnd:PreferredWidthSettings
def test_retrieve_preferred_width_type(self):
#ExStart:RetrievePreferredWidthType
doc = aw.Document(MY_DIR + "Tables.docx")
table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
#ExStart:AllowAutoFit
table.allow_auto_fit = True
#ExEnd:AllowAutoFit
first_cell = table.first_row.first_cell
type_ = first_cell.cell_format.preferred_width.type
value = first_cell.cell_format.preferred_width.value
#ExEnd:RetrievePreferredWidthType
def test_get_table_position(self):
#ExStart:GetTablePosition
doc = aw.Document(MY_DIR + "Tables.docx")
table = doc.get_child(aw.NodeType.TABLE, 0, True).as_table()
if table.text_wrapping == aw.tables.TextWrapping.AROUND:
print(table.relative_horizontal_alignment)
print(table.relative_vertical_alignment)
else:
print(table.alignment)
#ExEnd:GetTablePosition
def test_get_floating_table_position(self):
#ExStart:GetFloatingTablePosition
doc = aw.Document(MY_DIR + "Table wrapped by text.docx")
for table in doc.first_section.body.tables:
table = table.as_table()
# If the table is floating type, then print its positioning properties.
if table.text_wrapping == aw.tables.TextWrapping.AROUND:
print(table.horizontal_anchor)
print(table.vertical_anchor)
print(table.absolute_horizontal_distance)
print(table.absolute_vertical_distance)
print(table.allow_overlap)
print(table.absolute_horizontal_distance)
print(table.relative_vertical_alignment)
print("..............................")
#ExEnd:GetFloatingTablePosition
def test_floating_table_position(self):
#ExStart:FloatingTablePosition
doc = aw.Document(MY_DIR + "Table wrapped | |
<gh_stars>10-100
from nose import SkipTest
from nose.tools import assert_less_equal, assert_almost_equal, assert_equal
from numpy.ma.testutils import assert_close
from numpy.testing.utils import assert_allclose
from kernel_exp_family.estimators.finite.develop.gaussian import compute_b_memory, \
compute_C_memory, _objective_sym_completely_manual, \
_objective_sym_half_manual, update_b_single, update_L_C_single
from kernel_exp_family.estimators.finite.gaussian import fit, objective, \
compute_b, compute_C, update_C, KernelExpFiniteGaussian, \
update_b_L_C_weighted
from kernel_exp_family.kernels.kernels import rff_feature_map_grad2_d, \
rff_feature_map_grad_d, theano_available, rff_sample_basis
from kernel_exp_family.tools.numerics import log_sum_exp
import numpy as np
def test_compute_b_storage_1d1n():
X = np.array([[1.]])
u = np.array([2.])
omega = np.array([[2.]])
d = 0
b_manual = -rff_feature_map_grad2_d(X, omega, u, d).flatten()
b = compute_b_memory(X, omega, u)
assert_allclose(b_manual, b)
def test_compute_b_storage_1d2n():
X = np.array([[1.], [2.]])
u = np.array([2.])
omega = np.array([[2.]])
d = 0
b_manual = -np.mean(rff_feature_map_grad2_d(X, omega, u, d))
b = compute_b_memory(X, omega, u)
assert_allclose(b_manual, b)
def test_compute_C_1d1n():
X = np.array([[1.]])
u = np.array([2.])
omega = np.array([[2.]])
d = 0
phi = rff_feature_map_grad_d(X, omega, u, d).flatten()
C_manual = np.outer(phi, phi)
C = compute_C_memory(X, omega, u)
assert_allclose(C_manual, C)
def test_compute_C_1d2n():
X = np.array([[1.], [2.]])
u = np.array([2.])
omega = np.array([[2.]])
d = 0
C_manual = np.mean(rff_feature_map_grad_d(X, omega, u, d) ** 2)
C = compute_C_memory(X, omega, u)
assert_allclose(C_manual, C)
def test_fit():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
C = compute_C_memory(X, omega, u)
b = compute_b_memory(X, omega, u)
theta = fit(X, omega, u)
theta_manual = np.linalg.solve(C, b)
assert_allclose(theta, theta_manual)
def test_objective_sym_given_b_C():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
C = compute_C_memory(X, omega, u)
b = compute_b_memory(X, omega, u)
theta = np.random.randn(m)
J = objective(X, theta, omega, u, b, C)
J_manual = 0.5 * np.dot(theta.T, np.dot(C, theta)) - np.dot(theta, b)
assert_close(J, J_manual)
def test_objective_sym_given_b_C_equals_given_nothing():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
C = compute_C_memory(X, omega, u)
b = compute_b_memory(X, omega, u)
theta = np.random.randn(m)
J = objective(X, theta, omega, u, b, C)
J2 = objective(X, theta, omega, u)
assert_close(J, J2)
def test_objective_sym_equals_completely_manual_manually():
N = 100
D = 3
m = 3
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
theta = np.random.randn(m)
J_manual = 0.
for n in range(N):
b_manual = np.zeros(m)
C_manual = np.zeros((m, m))
J_n_manual = 0.
for d in range(D):
b_term_manual = -np.sqrt(2. / m) * np.cos(np.dot(X[n], omega) + u) * (omega[d, :] ** 2)
b_term = rff_feature_map_grad2_d(X[n], omega, u, d)
assert_allclose(b_term_manual, b_term)
b_manual -= b_term_manual
J_manual += np.dot(b_term_manual, theta)
J_n_manual += np.dot(b_term_manual, theta)
c_vec_manual = -np.sqrt(2. / m) * np.sin(np.dot(X[n], omega) + u) * omega[d, :]
c_vec = rff_feature_map_grad_d(X[n], omega, u, d)
assert_allclose(c_vec_manual, c_vec)
C_term = np.outer(c_vec_manual, c_vec_manual)
C_manual += C_term
# not regularised here, done afterwards
J_manual += 0.5 * np.dot(theta, np.dot(C_term, theta))
J_n_manual += 0.5 * np.dot(theta, np.dot(C_term, theta))
b = compute_b_memory(X[n].reshape(1, m), omega, u)
C = compute_C_memory(X[n].reshape(1, m), omega, u)
assert_allclose(b_manual, b)
assert_allclose(C_manual, C)
# discard regularisation for these internal checks
J_n = objective(X[n].reshape(1, m), theta, omega, u)
J_n_2 = 0.5 * np.dot(theta, np.dot(C, theta)) - np.dot(theta, b)
assert_allclose(J_n_2, J_n, rtol=1e-4)
assert_allclose(J_n_manual, J_n, rtol=1e-4)
J_manual /= N
J = objective(X, theta, omega, u)
assert_close(J, J_manual, decimal=5)
def test_objective_sym_equals_completely_manual():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
theta = np.random.randn(m)
J = objective(X, theta, omega, u)
J_manual = _objective_sym_completely_manual(X, theta, omega, u)
assert_close(J_manual, J, decimal=5)
def test_objective_sym_equals_half_manual():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
theta = np.random.randn(m)
J = objective(X, theta, omega, u)
J_manual = _objective_sym_half_manual(X, theta, omega, u)
assert_close(J_manual, J)
# import matplotlib.pyplot as plt
def test_fit_returns_min_1d_grid():
N = 100
D = 3
m = 1
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
C = compute_C_memory(X, omega, u)
b = compute_b_memory(X, omega, u)
theta = fit(X, omega, u)
J = objective(X, theta, omega, u, b, C)
thetas_test = np.linspace(theta - 3, theta + 3)
Js = np.zeros(len(thetas_test))
for i, theta_test in enumerate(thetas_test):
Js[i] = objective(X, np.array([theta_test]), omega, u, b, C)
# plt.plot(thetas_test, Js)
# plt.plot([theta, theta], [Js.min(), Js.max()])
# plt.title(str(theta))
# plt.show()
assert_almost_equal(Js.min(), J, delta=thetas_test[1] - thetas_test[0])
assert_almost_equal(thetas_test[Js.argmin()], theta[0], delta=thetas_test[1] - thetas_test[0])
def test_fit_returns_min_random_search():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
C = compute_C_memory(X, omega, u)
b = compute_b_memory(X, omega, u)
theta = fit(X, omega, u)
J = objective(X, theta, omega, u, b, C)
for noise in [0.0001, 0.001, 0.1, 1, 10, 100]:
for _ in range(10):
theta_test = np.random.randn(m) * noise + theta
J_test = objective(X, theta_test, omega, u, b, C)
assert_less_equal(J, J_test)
def test_compute_b_equals_compute_b_memory():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
b = compute_b(X, omega, u)
b_storage = compute_b_memory(X, omega, u)
assert_allclose(b, b_storage)
def test_compute_C_equals_compute_C_memory():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
C = compute_C(X, omega, u)
C_storage = compute_C_memory(X, omega, u)
assert_allclose(C, C_storage, rtol=1e-4)
def test_update_b_single_equals_batch():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
x = np.random.randn(D)
b = compute_b(X, omega, u)
b = update_b_single(x, b, n=N, omega=omega, u=u)
b_batch = compute_b(np.vstack((X, x)), omega, u)
assert_allclose(b, b_batch)
def test_update_C_equals_batch():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
x = np.random.randn(D)
C = compute_C(X, omega, u)
C = update_C(x, C, n=N, omega=omega, u=u)
C_batch = compute_C(np.vstack((X, x)), omega, u)
assert_allclose(C, C_batch)
def test_update_b_L_C_weighted_equals_compute_b_and_compute_L_C_constant_weights():
N = 2
D = 2
m = 2
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X1 = np.random.randn(N, D)
X2 = np.random.randn(N, D)
log_weights1 = np.log(np.ones(N))
log_weights2 = np.log(np.ones(N))
stacked = np.vstack((X1, X2))
b = compute_b(stacked, omega, u)
L_C = np.linalg.cholesky(compute_C(stacked, omega, u))
b_updated = compute_b(X1, omega, u)
L_C_updated = np.linalg.cholesky(compute_C(X1, omega, u))
log_sum_weights1 = log_sum_exp(log_weights1)
b_updated, L_C_updated = update_b_L_C_weighted(X2, b_updated, L_C_updated, log_sum_weights1, log_weights2, omega, u)
assert_allclose(b, b_updated)
assert_allclose(L_C, L_C_updated)
def test_update_L_C_naive_equals_batch():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
x = np.random.randn(D)
L_C = np.linalg.cholesky(compute_C(X, omega, u))
L_C = update_L_C_single(x, L_C, n=N, omega=omega, u=u)
L_C_batch = np.linalg.cholesky(compute_C(np.vstack((X, x)), omega, u))
assert_allclose(L_C, L_C_batch)
def test_update_L_C_equals_batch():
N = 100
D = 3
m = 10
omega = np.random.randn(D, m)
u = np.random.uniform(0, 2 * np.pi, m)
X = np.random.randn(N, D)
x = np.random.randn(D)
L_C = np.linalg.cholesky(compute_C(X, omega, u))
L_C = update_L_C_single(x, L_C, N, omega, u)
L_C_batch = np.linalg.cholesky(compute_C(np.vstack((X, x)), omega, u))
assert_allclose(L_C, L_C_batch)
def test_hessian_execute():
if not theano_available:
raise SkipTest("Theano not available.")
sigma = 1.
lmbda = 1.
N = 100
D = 2
m = 10
X = np.random.randn(N, D)
est = KernelExpFiniteGaussian(sigma, lmbda, m, D)
est.fit(X)
est.hessian(X[0])
def test_third_order_derivative_tensor_execute():
if not theano_available:
raise SkipTest("Theano not available.")
sigma = 1.
lmbda = 1.
N = 100
D = 2
m = 10
X = np.random.randn(N, D)
est = KernelExpFiniteGaussian(sigma, lmbda, m, D)
est.fit(X)
est.third_order_derivative_tensor(X[0])
def test_update_b_single_equals_compute_b_when_initialised_correctly():
sigma = 1.
N = 200
D = 2
m = 10
X = np.random.randn(N, D)
# basis
omega, u = rff_sample_basis(D, m, sigma)
# initial fit and update
b_update = np.zeros(m)
n_update = m
for x in X:
b_update = update_b_single(x, b_update, n_update, omega, u)
n_update += 1
# initial fit and batch (average of "fake" b | |
The axes
are temporarily set to values appropriate for the spectrogram, then the
new background (containing the plotted spectrogram) is grabbed and axes
are restored to a scale appropriate for all Track related features.
TODO -- look into automatic zero-padding to fix scaling issues?
TODO -- condense restart branches into one set of code
TODO -- implement more clear system for setting limits and duration...
"""
if restart == False:
self.current_waveform = waveform
self.current_fs = fs
self.y_high = fs/2
self.ax.clear()
self.tracks = []
self.ax.specgram(self.current_waveform, NFFT=window_len, Fs=self.current_fs,\
noverlap=int(window_len*noverlap), window=window_type(window_len),
cmap=plt.cm.gist_heat)
self.fig.canvas.draw()
self.getBackground()
for i in range(len(tracks)):
self.tracks.append(self.ax.plot(tracks[i].points, color="blue", marker="o"))
self.updateCanvas(redraw=True)
elif restart == True:
self.ax.clear()
self.ax.specgram(self.current_waveform, NFFT=window_len, Fs=self.current_fs,\
noverlap=int(window_len*noverlap), window=window_type(window_len),
cmap=plt.cm.gist_heat)
self.fig.canvas.draw()
self.background = self.fig.canvas.copy_from_bbox(self.ax.get_figure().bbox)
self.updateCanvas(redraw=True)
class F0Canvas(trackCanvas):
"""
Contains track representing an F0 contour.
Attributes:
See trackCanvas' doc string.
y_low (int) -- modified from trackCanvas, set to 90 as the lower margin
for possible F0.
y_high (int) -- modified from trackCavnas, set to 150 as the upper
margin for possible F0.
F0Canvas simply changes a handful fo default values from trackCanvas, and
otherwise functions as a typical trackCanvas.
"""
def __init__(self, parent=None):
trackCanvas.__init__(self, parent=parent)
# Set F0Canvas unique plot settings
self.ax.xaxis.set_visible(False)
self.fig.subplots_adjust(left=0.08, right=0.95)
# Initialize F0Canvas unique attributes, or adjust defaults
self.y_low = 90
self.y_high = 150
class DisplayDock(QDockWidget):
"""
Contains interface/controls for all main window display parameters.
Arguments:
parent (?) -- ?
Attributes:
loadedRadioButton (QRadioButton) -- if checked, results in loaded
waveform being the one displayed/analyzed/played.
synthRadioButton (QRadioButton) -- if checked, results in synthed
waveform being the one displayed/analyzed/played.
waveCheckBox (QCheckBox) -- if checked, wave_cv is shown.
STFTCheckBox (QCheckBox) -- if checked, stft_cv is shown.
showFTCheckBox (QCheckBox) -- if checked, formant tracks are shown.
clearButton (QButton) -- if pressed, all plots are cleared.
trackGroup (SliderGroup2) -- group of sliders to control various track
parameters.
trackBubbleCheckBox (QCheckBox) -- checkbox which allows track bubbles
to be enabled or disabled.
DisplayDock stores all user intefaces mechanisms that allow for changing
display-related parameters. Most widgets here which are attributes are
attributes so they can be referred to/accessed in main or in
TrackDrawSlots.
"""
def __init__(self, parent=None):
super(DisplayDock, self).__init__(parent)
self.setWindowTitle("Display settings")
### Select display group
dispGroupBox = QGroupBox("Display")
dispGroupVBox = QVBoxLayout()
dispGroupBox.setLayout(dispGroupVBox)
self.loadedRadioButton = QRadioButton("Loaded sound")
self.loadedRadioButton.setChecked(True)
self.synthedRadioButton = QRadioButton("Synthesized sound")
dispGroupVBox.addWidget(self.loadedRadioButton)
dispGroupVBox.addWidget(self.synthedRadioButton)
###
self.waveCheckBox = QCheckBox("Show waveform")
self.waveCheckBox.setChecked(True)
self.STFTCheckBox = QCheckBox("Show STFT")
self.STFTCheckBox.setChecked(True)
self.showFTCheckBox = QCheckBox("Show formant tracks")
self.showFTCheckBox.setChecked(True)
### Clear plots button
self.clearButton = QPushButton("Clear plots (Ctrl+L)")
self.clearButton.setToolTip("Clear all plots")
self.clearButton.setStatusTip("Clear all plots")
###
self.trackGroup = SliderGroup2(\
keys=["Number of points", "Bubble size"],
units=["", "Hz"],
mins=[20, 50],
maxs=[100, 500],
values=[DEFAULT_PARAMS.track_npoints,
DEFAULT_PARAMS.bubble_len])
self.trackBubbleCheckBox = QCheckBox("Use track bubbles")
self.trackBubbleCheckBox.setChecked(False)
### Set up main widget
mainWidget = QWidget()
mainVBox = QVBoxLayout()
mainWidget.setLayout(mainVBox)
mainVBox.addWidget(dispGroupBox)
mainVBox.addWidget(self.waveCheckBox)
mainVBox.addWidget(self.STFTCheckBox)
mainVBox.addWidget(self.showFTCheckBox)
mainVBox.addWidget(self.clearButton)
mainVBox.addWidget(self.trackGroup)
mainVBox.addWidget(self.trackBubbleCheckBox)
mainVBox.addStretch()
self.setWidget(mainWidget)
class AnalysisDock(QDockWidget):
"""
Contains interface/controls for all analysis parameters.
Arguments:
parent (?) -- ?
Attributes:
methodComboBox (QComboBox) -- combobox which allows the user to select
the type of analysis to be performed.
specGroup (QGroupBox) -- groupbox containing the user interface for
spectrogram parameters.
windowComboBox (QComboBox) -- combobox which allows user to select the
type of window to use in the spectrogram, part of specGroup.
spectrogramGroup (SliderGroup2) -- group of sliders to control various
spectrogram parameters.
waveletGroup (QGroupBox) -- groupbox containing the user interface for
wavelet analysis parameters.
applyButton (QButton) -- button which applies any updated analysis.
AnalysisDock stores all user intefaces mechanisms that allow for changing
analysis-related parameters. Whenever the methodComboBox is changed, the
dock displays the appropriate Group associated with the selected synthesis
type (i.e. specGroup is displayed if Spectrogram is chosen as the analysis
type). Most widgets here which are attributes are attributes so they can
be referred to/accessed in main or in TrackDrawSlots.
"""
def __init__(self, parent=None):
super(AnalysisDock, self).__init__(parent)
self.setWindowTitle("Analysis settings")
### Select analysis method group
methodGroup = QWidget()
methodVBox = QVBoxLayout()
methodGroup.setLayout(methodVBox)
resample_fs = DEFAULT_PARAMS.resample_fs
resampleLabel = QLabel("Resample rate: " + str(resample_fs) + " Hz")
methodLabel = QLabel("Method:")
self.methodComboBox = QComboBox()
self.methodComboBox.addItems(["Spectrogram", "Wavelet"])
self.methodComboBox.setCurrentIndex(0)
self.methodComboBox.currentIndexChanged.connect(self.changeAnalysis)
methodVBox.addWidget(resampleLabel)
methodVBox.addSpacing(15)
methodVBox.addWidget(methodLabel)
methodVBox.addWidget(self.methodComboBox)
###
### Spectrogram settings group box
self.specGroup = QGroupBox("Spectrogram settings")
specVBox = QVBoxLayout()
self.specGroup.setLayout(specVBox)
windowGroup = QWidget()
windowVBox = QVBoxLayout()
windowGroup.setLayout(windowVBox)
windowLabel = QLabel("Window function:")
self.windowComboBox = QComboBox()
self.windowComboBox.addItems(["Hamming", "Bartlett", "Blackman"])
self.windowComboBox.setCurrentIndex(0)
windowVBox.addWidget(windowLabel)
windowVBox.addWidget(self.windowComboBox)
self.spectrogramGroup = SliderGroup2(\
keys=["Frame size", "Frame overlap", "Threshold"],
units=["Samples", "%", "dB"],
mins=[64, 0, 0],
maxs=[1024, 99, 10],
values=[DEFAULT_PARAMS.window_len, DEFAULT_PARAMS.noverlap,
DEFAULT_PARAMS.threshold])
reassignCheckBox = QCheckBox("T-F reassignment")
specVBox.addWidget(windowGroup)
specVBox.addWidget(self.spectrogramGroup)
specVBox.addWidget(reassignCheckBox)
###
### Wavelet settings group box
self.waveletGroup = QGroupBox("Wavelet settings")
waveletVBox = QVBoxLayout()
self.waveletGroup.setLayout(waveletVBox)
settingGroup = QWidget()
settingVBox = QVBoxLayout(settingGroup)
waveletVBox.addWidget(settingGroup)
###
### Apply button
self.applyButton = QPushButton("Apply settings (Ctrl+R)")
self.applyButton.setToolTip("Apply analysis settings")
self.applyButton.setStatusTip("Apply analysis settings")
###
### Set up main widget
mainWidget = QWidget()
mainVBox = QVBoxLayout()
mainWidget.setLayout(mainVBox)
mainVBox.addWidget(methodGroup)
mainVBox.addWidget(self.specGroup)
mainVBox.addWidget(self.waveletGroup)
self.waveletGroup.setHidden(True)
mainVBox.addWidget(self.applyButton)
mainVBox.addStretch()
self.setWidget(mainWidget)
###
@pyqtSlot()
def changeAnalysis(self):
currIdx = self.methodComboBox.currentIndex()
if currIdx == 0:
self.specGroup.setHidden(False)
self.waveletGroup.setHidden(True)
elif currIdx == 1:
self.specGroup.setHidden(True)
self.waveletGroup.setHidden(False)
class SynthesisDock(QDockWidget):
"""
Contains interface/controls for all synthesis parameters.
Arguments:
parent (?) -- ?
Attributes:
methodComboBox (QComboBox) -- combobox allowing for selection of
type of synthesis to be used
nformantComboBox (QComboBox) -- combobox allowing for selection of
number of formants to be synthesized.
klattGroup (QGroupBox) -- groupbox for Klatt synthesizer parameters
sineGroup (QGroupBox) -- groupbox for Sine wave synthesizer parameters
amplitudeGroup (SliderGroup2) -- group of sliders to control different
amplifier parameters in the Klatt synthesizer.
FFBandwidthGroup (SliderGroup2) -- group of sliders to allow for
selection formant bandwidths to be synthesized.
synthButton (QButton) -- button for initiating synthesis using current
settings.
SynthesisDock stores all user intefaces mechanisms that allow for changing
synthesis-related parameters. Whenever the methodComboBox is changed, the
dock displays the appropriate Group associated with the selected synthesis
type (i.e. klattGroup is displayed if Klatt 1980 is chosen as the synthesis
type). Most widgets here which are attributes are attributes so they can
be referred to/accessed in main or in TrackDrawSlots.
"""
def __init__(self, parent=None):
super(SynthesisDock, self).__init__(parent)
self.setWindowTitle("Synthesis settings")
### Select synthesis method group
methodGroup = QWidget()
methodVBox = QVBoxLayout()
methodGroup.setLayout(methodVBox)
synthesis_fs = DEFAULT_PARAMS.synth_fs
synthesisLabel = QLabel("Synthesis rate: " + str(synthesis_fs) + " Hz")
methodLabel = QLabel("Method:")
self.methodComboBox = QComboBox()
self.methodComboBox.addItems(["Klatt 1980", "Sine wave"])
self.methodComboBox.setCurrentIndex(0)
self.methodComboBox.currentIndexChanged.connect(self.changeSynthesis)
methodVBox.addWidget(synthesisLabel)
methodVBox.addSpacing(15)
methodVBox.addWidget(methodLabel)
methodVBox.addWidget(self.methodComboBox)
###
nformantGroup = QWidget()
nformantVBox = QVBoxLayout()
nformantGroup.setLayout(nformantVBox)
nformantLabel = QLabel("Number of formant tracks:")
self.nformantComboBox = QComboBox()
self.nformantComboBox.addItems(["1", "2", "3", "4", "5"])
self.nformantComboBox.setCurrentIndex(4)
nformantVBox.addWidget(nformantLabel)
nformantVBox.addWidget(self.nformantComboBox)
### Klatt synthesis settings group box
self.klattGroup = QGroupBox("Klatt synthesizer settings")
klattVBox = QVBoxLayout()
self.klattGroup.setLayout(klattVBox)
voicingGroup = QWidget()
voicingVBox = QVBoxLayout()
voicingGroup.setLayout(voicingVBox)
self.amplitudeGroup = SliderGroup2(\
keys=["Amplitude of voicing", "Amplitude of QS voicing",
"Amplitude of aspiration", "Amplitude of frication"],
units=["dB", "dB", "dB", "dB"],
mins=[0, 0, 0, 0],
maxs=[40, 40, 40, 40],
values=[DEFAULT_PARAMS.AV, DEFAULT_PARAMS.AVS,
DEFAULT_PARAMS.AH, DEFAULT_PARAMS.AF])
self.FFBandwidthGroup = SliderGroup2(\
keys=["F1 bandwidth", "F2 bandwidth", "F3 bandwidth",
"F4 bandwidth", "F5 bandwidth"],
units=["Hz", "Hz", "Hz", "Hz", "Hz"],
mins=[50, 50, 10, 50, 50],
maxs=[250, 250, 250, 250, 250],
values=[DEFAULT_PARAMS.BW[0], DEFAULT_PARAMS.BW[1],
DEFAULT_PARAMS.BW[2], DEFAULT_PARAMS.BW[3],
DEFAULT_PARAMS.BW[4]])
klattVBox.addWidget(voicingGroup)
klattVBox.addWidget(self.amplitudeGroup)
klattVBox.addWidget(self.FFBandwidthGroup)
###
### Sine wave synthesis settings group box
self.sineGroup = QGroupBox("Sine wave synthesizer settings")
sineVBox = QVBoxLayout()
self.sineGroup.setLayout(sineVBox)
###
### Synthesize button
self.synthButton = QPushButton("Synthesize (Ctrl+Y)")
self.synthButton.setToolTip("Synthesize using current settings")
self.synthButton.setStatusTip("Synthesize using current settings")
###
### Set up main widget
mainWidget = QWidget()
mainVBox = QVBoxLayout()
mainWidget.setLayout(mainVBox)
mainVBox.addWidget(methodGroup)
mainVBox.addWidget(nformantGroup)
mainVBox.addWidget(self.klattGroup)
mainVBox.addWidget(self.sineGroup)
self.sineGroup.setHidden(True)
mainVBox.addWidget(self.synthButton)
mainVBox.addStretch()
self.setWidget(mainWidget)
###
@pyqtSlot()
def changeSynthesis(self):
currIdx = self.methodComboBox.currentIndex()
if currIdx == 0:
self.klattGroup.setHidden(False)
self.sineGroup.setHidden(True)
elif currIdx == 1:
self.klattGroup.setHidden(True)
self.sineGroup.setHidden(False)
class SliderGroup(QWidget):
"""
A convenience widget for displaying slider information (minimum, maximum,
and current value). Set stepDouble=True to create a slider that doubles
its value each step.
"""
def __init__(self, parent=None, label="", units="", minimum=1, maximum=99,
value=1, stepSize=1, stepDouble=False, orientation=Qt.Horizontal):
super(SliderGroup, self).__init__(parent)
self.labelTxt = label
self.unitsTxt = units
self.stepSize = stepSize
self.stepDouble = stepDouble
if self.stepDouble:
self.currValue = 2**value
minLabel = QLabel(str(2**minimum))
maxLabel = QLabel(str(2**maximum))
else:
self.currValue = self.stepSize*value
minLabel = QLabel(str(self.stepSize*minimum))
maxLabel = QLabel(str(self.stepSize*maximum))
topContainer = QWidget()
topHBox = QHBoxLayout()
topContainer.setLayout(topHBox)
topTxt = self.labelTxt + " " + str(self.currValue)\
+ " " + self.unitsTxt
self.topLabel = QLabel(topTxt)
topHBox.addWidget(self.topLabel)
botContainer = QWidget()
botHBox | |
# flowbyfunctions.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Helper functions for flowbyactivity and flowbysector data
"""
import pandas as pd
import numpy as np
from esupy.dqi import get_weighted_average
import flowsa
from flowsa.common import fbs_activity_fields, sector_level_key, \
load_crosswalk, fbs_fill_na_dict, check_activities_sector_like, \
fbs_collapsed_default_grouping_fields, fbs_collapsed_fill_na_dict, \
fba_activity_fields, fba_default_grouping_fields, \
load_sector_length_cw_melt, fba_fill_na_dict, \
fba_mapped_default_grouping_fields
from flowsa.dataclean import clean_df, replace_strings_with_NoneType, \
replace_NoneType_with_empty_cells, standardize_units
from flowsa.location import US_FIPS, get_state_FIPS, \
get_county_FIPS, update_geoscale, fips_number_key
from flowsa.schema import flow_by_activity_fields, flow_by_sector_fields, \
flow_by_sector_collapsed_fields, flow_by_activity_mapped_fields
from flowsa.settings import log, vLogDetailed, vLog
def create_geoscale_list(df, geoscale, year='2015'):
"""
Create a list of FIPS associated with given geoscale
:param df: FlowBySector of FlowByActivity df
:param geoscale: 'national', 'state', or 'county'
:param year: str, year of FIPS, defaults to 2015
:return: list of relevant FIPS
"""
# filter by geoscale depends on Location System
fips = []
if geoscale == "national":
fips.append(US_FIPS)
elif df['LocationSystem'].str.contains('FIPS').any():
if geoscale == "state":
state_FIPS = get_state_FIPS(year)
state_FIPS = state_FIPS[state_FIPS['FIPS'] != '72000']
fips = list(state_FIPS['FIPS'])
elif geoscale == "county":
county_FIPS = get_county_FIPS(year)
fips = list(county_FIPS['FIPS'])
return fips
def filter_by_geoscale(df, geoscale):
"""
Filter flowbyactivity by FIPS at the given scale
:param df: Either flowbyactivity or flowbysector
:param geoscale: string, either 'national', 'state', or 'county'
:return: filtered flowbyactivity or flowbysector
"""
fips = create_geoscale_list(df, geoscale)
df = df[df['Location'].isin(fips)].reset_index(drop=True)
if len(df) == 0:
raise flowsa.exceptions.FBSMethodConstructionError(
message="No flows found in the flow dataset at "
f"the {geoscale} scale")
else:
return df
def agg_by_geoscale(df, from_scale, to_scale, groupbycols):
"""
Aggregate a df by geoscale
:param df: flowbyactivity or flowbysector df
:param from_scale: str, geoscale to aggregate from
('national', 'state', 'county')
:param to_scale: str, geoscale to aggregate to (
'national', 'state', 'county')
:param groupbycols: flowbyactivity or flowbysector default groupby columns
:return: df, at identified to_scale geographic level
"""
# use from scale to filter by these values
df = filter_by_geoscale(df, from_scale).reset_index(drop=True)
df = update_geoscale(df, to_scale)
fba_agg = aggregator(df, groupbycols)
return fba_agg
def aggregator(df, groupbycols, retain_zeros=True):
"""
Aggregates flowbyactivity or flowbysector 'FlowAmount' column in df and
generate weighted average values based on FlowAmount values for numeric
columns
:param df: df, Either flowbyactivity or flowbysector
:param groupbycols: list, Either flowbyactivity or flowbysector columns
:param retain_zeros, bool, default True, if set to True, all rows that
have a FlowAmount = 0 will be returned in df. If False, those rows will
be dropped
:return: df, with aggregated columns
"""
# reset index
df = df.reset_index(drop=True)
# tmp replace null values with empty cells
df = replace_NoneType_with_empty_cells(df)
# drop columns with flowamount = 0
if retain_zeros is False:
df = df[df['FlowAmount'] != 0]
# list of column headers, that if exist in df, should be
# aggregated using the weighted avg fxn
possible_column_headers = \
('Spread', 'Min', 'Max', 'DataReliability', 'TemporalCorrelation',
'GeographicalCorrelation', 'TechnologicalCorrelation',
'DataCollection')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers
if e in df.columns.values.tolist()]
groupbycols = [c for c in groupbycols if c not in column_headers]
df_dfg = df.groupby(groupbycols).agg({'FlowAmount': ['sum']})
def is_identical(s):
a = s.to_numpy()
return (a[0] == a).all()
# run through other columns creating weighted average
for e in column_headers:
if len(df) > 0 and is_identical(df[e]):
df_dfg.loc[:, e] = df[e].iloc[0]
else:
df_dfg[e] = get_weighted_average(df, e, 'FlowAmount', groupbycols)
df_dfg = df_dfg.reset_index()
df_dfg.columns = df_dfg.columns.droplevel(level=1)
# if datatypes are strings, ensure that Null values remain NoneType
df_dfg = replace_strings_with_NoneType(df_dfg)
return df_dfg
def sector_ratios(df, sectorcolumn):
"""
Determine ratios of the less aggregated sectors within a
more aggregated sector
:param df: A df with sector columns
:param sectorcolumn: 'SectorConsumedBy' or 'SectorProducedBy'
:return: df, with 'FlowAmountRatio' column
"""
# drop any null rows (can occur when activities are ranges)
df = df[~df[sectorcolumn].isnull()]
# find the longest length sector
length = max(df[sectorcolumn].apply(lambda x: len(str(x))).unique())
# for loop in reverse order longest length naics minus 1 to 2
# appends missing naics levels to df
sec_ratios = []
for i in range(length, 3, -1):
# subset df to sectors with length = i
df_subset = subset_df_by_sector_lengths(df, [i])
# create column for sector grouping
df_subset = assign_sector_match_column(df_subset, sectorcolumn, i, i-1)
# subset df to create denominator
df_denom = df_subset[['FlowAmount', 'Location', 'sector_group']]
df_denom = df_denom.groupby(['Location', 'sector_group'],
as_index=False).agg({"FlowAmount": sum})
df_denom = df_denom.rename(columns={"FlowAmount": "Denominator"})
# merge the denominator column with fba_w_sector df
ratio_df = df_subset.merge(df_denom, how='left')
# calculate ratio
ratio_df.loc[:, 'FlowAmountRatio'] = \
ratio_df['FlowAmount'] / ratio_df['Denominator']
ratio_df = ratio_df.drop(
columns=['Denominator', 'sector_group'])
sec_ratios.append(ratio_df)
# concat list of dataframes (info on each page)
df_w_ratios = pd.concat(sec_ratios, ignore_index=True)
return df_w_ratios
def sector_aggregation(df_load):
"""
Function that checks if a sector length exists, and if not,
sums the less aggregated sector
:param df_load: Either a flowbyactivity df with sectors or
a flowbysector df
:return: df, with aggregated sector values
"""
# ensure None values are not strings
df = replace_NoneType_with_empty_cells(df_load)
# determine grouping columns - based on datatype
group_cols = list(df.select_dtypes(include=['object', 'int']).columns)
# determine if activities are sector-like, if aggregating a df with a
# 'SourceName'
sector_like_activities = check_activities_sector_like(df_load)
# if activities are sector like, drop columns while running ag then
# add back in
if sector_like_activities:
# subset df
df_cols = [e for e in df.columns if e not in
('ActivityProducedBy', 'ActivityConsumedBy')]
group_cols = [e for e in group_cols if e not in
('ActivityProducedBy', 'ActivityConsumedBy')]
df = df[df_cols]
df = df.reset_index(drop=True)
# load naics length crosswwalk
cw_load = load_crosswalk('sector_length')
# find the longest length sector
length = df[[fbs_activity_fields[0], fbs_activity_fields[1]]].apply(
lambda x: x.str.len()).max().max()
length = int(length)
# for loop in reverse order longest length NAICS minus 1 to 2
# appends missing naics levels to df
for i in range(length, 2, -1):
dfm = subset_and_merge_df_by_sector_lengths(df, i, i-1)
# only keep values in left df, meaning there are no more
# aggregated naics in the df
dfm2 = dfm.query('_merge=="left_only"').drop(
columns=['_merge', 'SPB_tmp', 'SCB_tmp'])
sector_merge = 'NAICS_' + str(i)
sector_add = 'NAICS_' + str(i - 1)
# subset the df by naics length
cw = cw_load[[sector_merge, sector_add]].drop_duplicates()
# loop through and add additional naics
sectype_list = ['Produced', 'Consumed']
for s in sectype_list:
dfm2 = dfm2.merge(cw, how='left', left_on=[f'Sector{s}By'],
right_on=sector_merge)
dfm2[f'Sector{s}By'] = dfm2[sector_add]
dfm2 = dfm2.drop(columns=[sector_merge, sector_add])
dfm2 = replace_NoneType_with_empty_cells(dfm2)
# aggregate the new sector flow amounts
if 'FlowAmount' in dfm2.columns:
agg_sectors = aggregator(dfm2, group_cols)
# if FlowName is not in column and instead aggregating for the
# HelperFlow then simply sum helper flow column
else:
agg_sectors = dfm2.groupby(group_cols)['HelperFlow']\
.sum().reset_index()
# append to df
agg_sectors = replace_NoneType_with_empty_cells(agg_sectors)
df = pd.concat([df, agg_sectors], ignore_index=True).reset_index(
drop=True)
# if activities are source-like, set col values as
# copies of the sector columns
if sector_like_activities & ('FlowAmount' in df.columns) & \
('ActivityProducedBy' in df_load.columns):
df = df.assign(ActivityProducedBy=df['SectorProducedBy'])
df = df.assign(ActivityConsumedBy=df['SectorConsumedBy'])
# replace null values
df = replace_strings_with_NoneType(df).reset_index(drop=True)
return df
def sector_disaggregation(df_load):
"""
function to disaggregate sectors if there is only one
naics at a lower level works for lower than naics 4
:param df_load: A FBS df, must have sector columns
:return: A FBS df with values for the missing naics5 and naics6
"""
# ensure None values are not strings
df = replace_NoneType_with_empty_cells(df_load)
# determine if activities are sector-like, if aggregating
# a df with a 'SourceName'
sector_like_activities = check_activities_sector_like(df_load)
# if activities are sector like, drop columns while running disag then
# add back in
if sector_like_activities:
df = df.drop(columns=['ActivityProducedBy', 'ActivityConsumedBy'],
errors='ignore')
df = df.reset_index(drop=True)
# load naics 2 to naics 6 crosswalk
cw_load = load_crosswalk('sector_length')
# appends missing naics levels to df
for i in range(2, 6):
dfm = subset_and_merge_df_by_sector_lengths(df, i, i + 1)
# only keep values in left column, meaning there are no less
# aggregated naics in the df
dfm2 = dfm.query('_merge=="left_only"').drop(
columns=['_merge', 'SPB_tmp', 'SCB_tmp'])
sector_merge = 'NAICS_' + str(i)
sector_add = 'NAICS_' + str(i + 1)
# subset the df by naics length
cw = cw_load[[sector_merge, sector_add]].drop_duplicates()
# only keep the rows where there is only one value in sector_add for
# a value in sector_merge
cw = cw.drop_duplicates(subset=[sector_merge], keep=False).reset_index(
drop=True)
# loop through and add additional naics
sectype_list = ['Produced', 'Consumed']
for s in sectype_list:
# inner join - only keep rows where there are | |
Lord Greatsword+4",
314405: "Great Lord Greatsword+5",
314500: "Great Lord Greatsword",
314501: "Great Lord Greatsword+1",
314502: "Great Lord Greatsword+2",
314503: "Great Lord Greatsword+3",
314504: "Great Lord Greatsword+4",
314505: "Great Lord Greatsword+5",
314600: "Great Lord Greatsword",
314601: "Great Lord Greatsword+1",
314602: "Great Lord Greatsword+2",
314603: "Great Lord Greatsword+3",
314604: "Great Lord Greatsword+4",
314605: "Great Lord Greatsword+5",
314700: "Great Lord Greatsword",
314701: "Great Lord Greatsword+1",
314702: "Great Lord Greatsword+2",
314703: "Great Lord Greatsword+3",
314704: "Great Lord Greatsword+4",
314705: "Great Lord Greatsword+5",
314800: "Great Lord Greatsword",
314801: "Great Lord Greatsword+1",
314802: "Great Lord Greatsword+2",
314803: "Great Lord Greatsword+3",
314804: "Great Lord Greatsword+4",
314805: "Great Lord Greatsword+5",
314900: "Great Lord Greatsword",
314901: "Great Lord Greatsword+1",
314902: "Great Lord Greatsword+2",
314903: "Great Lord Greatsword+3",
314904: "Great Lord Greatsword+4",
314905: "Great Lord Greatsword+5",
315000: "Great Lord Greatsword",
315001: "Great Lord Greatsword+1",
315002: "Great Lord Greatsword+2",
315003: "Great Lord Greatsword+3",
315004: "Great Lord Greatsword+4",
315005: "Great Lord Greatsword+5",
315100: "Great Lord Greatsword",
315101: "Great Lord Greatsword+1",
315102: "Great Lord Greatsword+2",
315103: "Great Lord Greatsword+3",
315104: "Great Lord Greatsword+4",
315105: "Great Lord Greatsword+5",
315200: "Great Lord Greatsword",
315201: "Great Lord Greatsword+1",
315202: "Great Lord Greatsword+2",
315203: "Great Lord Greatsword+3",
315204: "Great Lord Greatsword+4",
315205: "Great Lord Greatsword+5",
315300: "Great Lord Greatsword",
315301: "Great Lord Greatsword+1",
315302: "Great Lord Greatsword+2",
315303: "Great Lord Greatsword+3",
315304: "Great Lord Greatsword+4",
315305: "Great Lord Greatsword+5",
315400: "Great Lord Greatsword",
315401: "Great Lord Greatsword+1",
315402: "Great Lord Greatsword+2",
315403: "Great Lord Greatsword+3",
315404: "Great Lord Greatsword+4",
315405: "Great Lord Greatsword+5",
315500: "Great Lord Greatsword",
315501: "Great Lord Greatsword+1",
315502: "Great Lord Greatsword+2",
315503: "Great Lord Greatsword+3",
315504: "Great Lord Greatsword+4",
315505: "Great Lord Greatsword+5",
315600: "Great Lord Greatsword",
315601: "Great Lord Greatsword+1",
315602: "Great Lord Greatsword+2",
315603: "Great Lord Greatsword+3",
315604: "Great Lord Greatsword+4",
315605: "Great Lord Greatsword+5",
315700: "Great Lord Greatsword",
315701: "Great Lord Greatsword+1",
315702: "Great Lord Greatsword+2",
315703: "Great Lord Greatsword+3",
315704: "Great Lord Greatsword+4",
315705: "Great Lord Greatsword+5",
350000: "Zweihander",
350001: "Zweihander+1",
350002: "Zweihander+2",
350003: "Zweihander+3",
350004: "Zweihander+4",
350005: "Zweihander+5",
350006: "Zweihander+6",
350007: "Zweihander+7",
350008: "Zweihander+8",
350009: "Zweihander+9",
350010: "Zweihander+10",
350011: "Zweihander+11",
350012: "Zweihander+12",
350013: "Zweihander+13",
350014: "Zweihander+14",
350015: "Zweihander+15",
350100: "Crystal Zweihander",
350101: "Crystal Zweihander+1",
350102: "Crystal Zweihander+2",
350103: "Crystal Zweihander+3",
350104: "Crystal Zweihander+4",
350105: "Crystal Zweihander+5",
350200: "Lightning Zweihander",
350201: "Lightning Zweihander+1",
350202: "Lightning Zweihander+2",
350203: "Lightning Zweihander+3",
350204: "Lightning Zweihander+4",
350205: "Lightning Zweihander+5",
350300: "Raw Zweihander",
350301: "Raw Zweihander+1",
350302: "Raw Zweihander+2",
350303: "Raw Zweihander+3",
350304: "Raw Zweihander+4",
350305: "Raw Zweihander+5",
350400: "Magic Zweihander",
350401: "Magic Zweihander+1",
350402: "Magic Zweihander+2",
350403: "Magic Zweihander+3",
350404: "Magic Zweihander+4",
350405: "Magic Zweihander+5",
350406: "Magic Zweihander+6",
350407: "Magic Zweihander+7",
350408: "Magic Zweihander+8",
350409: "Magic Zweihander+9",
350410: "Magic Zweihander+10",
350500: "Enchanted Zweihander",
350501: "Enchanted Zweihander+1",
350502: "Enchanted Zweihander+2",
350503: "Enchanted Zweihander+3",
350504: "Enchanted Zweihander+4",
350505: "Enchanted Zweihander+5",
350600: "Divine Zweihander",
350601: "Divine Zweihander+1",
350602: "Divine Zweihander+2",
350603: "Divine Zweihander+3",
350604: "Divine Zweihander+4",
350605: "Divine Zweihander+5",
350606: "Divine Zweihander+6",
350607: "Divine Zweihander+7",
350608: "Divine Zweihander+8",
350609: "Divine Zweihander+9",
350610: "Divine Zweihander+10",
350700: "Occult Zweihander",
350701: "Occult Zweihander+1",
350702: "Occult Zweihander+2",
350703: "Occult Zweihander+3",
350704: "Occult Zweihander+4",
350705: "Occult Zweihander+5",
350800: "Fire Zweihander",
350801: "Fire Zweihander+1",
350802: "Fire Zweihander+2",
350803: "Fire Zweihander+3",
350804: "Fire Zweihander+4",
350805: "Fire Zweihander+5",
350806: "Fire Zweihander+6",
350807: "Fire Zweihander+7",
350808: "Fire Zweihander+8",
350809: "Fire Zweihander+9",
350810: "Fire Zweihander+10",
350900: "Chaos Zweihander",
350901: "Chaos Zweihander+1",
350902: "Chaos Zweihander+2",
350903: "Chaos Zweihander+3",
350904: "Chaos Zweihander+4",
350905: "Chaos Zweihander+5",
351000: "Greatsword",
351001: "Greatsword+1",
351002: "Greatsword+2",
351003: "Greatsword+3",
351004: "Greatsword+4",
351005: "Greatsword+5",
351006: "Greatsword+6",
351007: "Greatsword+7",
351008: "Greatsword+8",
351009: "Greatsword+9",
351010: "Greatsword+10",
351011: "Greatsword+11",
351012: "Greatsword+12",
351013: "Greatsword+13",
351014: "Greatsword+14",
351015: "Greatsword+15",
351100: "Crystal Greatsword",
351101: "Crystal Greatsword+1",
351102: "Crystal Greatsword+2",
351103: "Crystal Greatsword+3",
351104: "Crystal Greatsword+4",
351105: "Crystal Greatsword+5",
351200: "Lightning Greatsword",
351201: "Lightning Greatsword+1",
351202: "Lightning Greatsword+2",
351203: "Lightning Greatsword+3",
351204: "Lightning Greatsword+4",
351205: "Lightning Greatsword+5",
351300: "Raw Greatsword",
351301: "Raw Greatsword+1",
351302: "Raw Greatsword+2",
351303: "Raw Greatsword+3",
351304: "Raw Greatsword+4",
351305: "Raw Greatsword+5",
351400: "Magic Greatsword",
351401: "Magic Greatsword+1",
351402: "Magic Greatsword+2",
351403: "Magic Greatsword+3",
351404: "Magic Greatsword+4",
351405: "Magic Greatsword+5",
351406: "Magic Greatsword+6",
351407: "Magic Greatsword+7",
351408: "Magic Greatsword+8",
351409: "Magic Greatsword+9",
351410: "Magic Greatsword+10",
351500: "Enchanted Greatsword",
351501: "Enchanted Greatsword+1",
351502: "Enchanted Greatsword+2",
351503: "Enchanted Greatsword+3",
351504: "Enchanted Greatsword+4",
351505: "Enchanted Greatsword+5",
351600: "Divine Greatsword",
351601: "Divine Greatsword+1",
351602: "Divine Greatsword+2",
351603: "Divine Greatsword+3",
351604: "Divine Greatsword+4",
351605: "Divine Greatsword+5",
351606: "Divine Greatsword+6",
351607: "Divine Greatsword+7",
351608: "Divine Greatsword+8",
351609: "Divine Greatsword+9",
351610: "Divine Greatsword+10",
351700: "Occult Greatsword",
351701: "Occult Greatsword+1",
351702: "Occult Greatsword+2",
351703: "Occult Greatsword+3",
351704: "Occult Greatsword+4",
351705: "Occult Greatsword+5",
351800: "Fire Greatsword",
351801: "Fire Greatsword+1",
351802: "Fire Greatsword+2",
351803: "Fire Greatsword+3",
351804: "Fire Greatsword+4",
351805: "Fire Greatsword+5",
351806: "Fire Greatsword+6",
351807: "Fire Greatsword+7",
351808: "Fire Greatsword+8",
351809: "Fire Greatsword+9",
351810: "Fire Greatsword+10",
351900: "Chaos Greatsword",
351901: "Chaos Greatsword+1",
351902: "Chaos Greatsword+2",
351903: "Chaos Greatsword+3",
351904: "Chaos Greatsword+4",
351905: "Chaos Greatsword+5",
352000: "Demon Great Machete",
352001: "Demon Great Machete+1",
352002: "Demon Great Machete+2",
352003: "Demon Great Machete+3",
352004: "Demon Great Machete+4",
352005: "Demon Great Machete+5",
352006: "Demon Great Machete+6",
352007: "Demon Great Machete+7",
352008: "Demon Great Machete+8",
352009: "Demon Great Machete+9",
352010: "Demon Great Machete+10",
352011: "Demon Great Machete+11",
352012: "Demon Great Machete+12",
352013: "Demon Great Machete+13",
352014: "Demon Great Machete+14",
352015: "Demon Great Machete+15",
352100: "Crystal Demon Great Machete",
352101: "Crys. Demon Great Machete+1",
352102: "Crys. Demon Great Machete+2",
352103: "Crys. Demon Great Machete+3",
352104: "Crys. Demon Great Machete+4",
352105: "Crys. Demon Great Machete+5",
352200: "Ltng. Demon Great Machete",
352201: "Ltng. Demon Great Machete+1",
352202: "Ltng. Demon Great Machete+2",
352203: "Ltng. Demon Great Machete+3",
352204: "Ltng. Demon Great Machete+4",
352205: "Ltng. Demon Great Machete+5",
352300: "Raw Demon Great Machete",
352301: "Raw Demon Great Machete+1",
352302: "Raw Demon Great Machete+2",
352303: "Raw Demon Great Machete+3",
352304: "Raw Demon Great Machete+4",
352305: "Raw Demon Great Machete+5",
352400: "Magic Demon Great Machete",
352401: "Magic Demon Great Machete+1",
352402: "Magic Demon Great Machete+2",
352403: "Magic Demon Great Machete+3",
352404: "Magic Demon Great Machete+4",
352405: "Magic Demon Great Machete+5",
352406: "Magic Demon Great Machete+6",
352407: "Magic Demon Great | |
'''
Created on 27 févr. 2022
@author: slinux
'''
import threading
import logging
import wx
import time
from wxRavenGUI.application.wxcustom import *
import secrets
import math
from multiprocessing import Process
from threading import Thread, Lock
import ctypes
#
#
# Plugin Job is a class for WxRaven JobManager that execute background task to improve userexperience
#
class Job(object):
'''
classdocs
'''
jobName = 'UnknownJob'
jobId = 'UnknownJobId'
_jobRunning = False
_jobDone = False
_jobProgressPercent = 0
_jobDetailedProgress = ''
_jobDetailedProgress_max = 0
_jobDetailedProgress_cur = 0
_jobResult = None
_jobError = None
_jobStatus = 'New'
_jobNetwork = None
#_jobSize = 0
_jobSaved = False
_run_safe = True
_notify_at_end = True
_jobResultExpire = -1
_jobStartTime = 0
_jobStopTime = 0
_jobElapsedTime = 0
_jobMaxRunningTime = 1200 #20Minutes for the standard job
_jobDelayBefore = 0
_jobDelayAfter = 0
_jobNumber = 0
_jobOtherJobRequirements = []
_jobDirectCallBack = None
_daemonize_job = True
_export_params_list= []
_jobUniqueId = None
_JobAllowRemoteExecution = False
_jobNetworkCompatibility = ['RPC']
_jobReusable = True
_jobFromRemote = False
_jobTxStandbyDescription = None
_jobPaymentStandby = None
_jobPaymentAmount = None
_jobTxStandby = None
_jobPaymentStatus = None
_useMultiProcess = False
def __init__(self ,parentFrame, plugin=None, viewCallback=None, safeMode=True, notifyAtEnd=True):
'''
Constructor
'''
self._run_safe= safeMode
self.logger = logging.getLogger('wxRaven')
self.parentFrame = parentFrame
self.plugin = plugin
self.source = 'Unknown'
if plugin !=None:
self.source = plugin.PLUGIN_NAME
self.jobName = 'UnknownJob'
self.jobId = 'UnknownJob'
_newToken = secrets.token_urlsafe(16)
self._jobUniqueId = _newToken
self._jobNetwork = None
self._jobDirectCallBack = viewCallback
self._jobDetailedProgress = "No manager assigned"
self._notify_at_end = notifyAtEnd
self._export_params_list=[]
self.addExportParam('jobName')
self._jobNetworkCompatibility = ['RPC']
self.jobProcessInstance=None
self._lock = Lock()
#
# Jsonify for Import Export through RPC
#
def ExportRemoteJobResultJson(self):
return self._jobResult
def ExportRemoteJobStatusJson(self, _withResult=False):
self.__refreshProgessDatas__()
_jsonData = {
'jobName':self.jobName,
'_jobStatus':self._jobStatus,
'_jobRunning':self._jobRunning,
'_jobDone':self._jobDone,
'_jobMaxRunningTime':self._jobMaxRunningTime,
'_jobProgressPercent':self._jobProgressPercent,
'_jobDetailedProgress':self._jobDetailedProgress,
'_jobDetailedProgress_max':self._jobDetailedProgress_max,
'_jobDetailedProgress_cur':self._jobDetailedProgress_cur,
'_jobStartTime': self._jobStartTime,
'_jobElapsedTime': self._jobElapsedTime,
'_jobStopTime':self._jobStopTime,
'_jobTxStandby': self._jobTxStandby,
'_jobPaymentStandby': self._jobPaymentStandby,
'_jobTxStandbyDescription': self._jobTxStandbyDescription,
'_jobPaymentAmount': self._jobPaymentAmount,
'_jobPaymentStatus': self._jobPaymentStatus,
}
if self._jobError == None:
_jsonData['_jobError'] = None
else:
_jsonData['_jobError'] = str(self._jobError)
if _withResult:
_jsonData['_jobResult'] = self._jobResult
else:
_jsonData['_jobResult'] = None
return _jsonData
def ExportRemoteParametersJson(self):
_jsonData = {}
for _key in self._export_params_list:
try:
self.logger.info(f'{self.jobName} : exporting param {_key} ')
_jsonData[_key] = getattr(self, _key)
except Exception as e:
self.logger.error(f'Unable to export {_key} in {self.jobName} : {e} ')
return _jsonData
def RestoreParameters(self,_jsonData ):
#do not allow this setting to change
_sever_exceptions = ['_jobNetwork']
for _k in _jsonData:
if _k in _sever_exceptions:
self.logger.warning(f'Invalid or Not authorized Job parameter : {_k}')
continue
self.logger.info(f'setting param {_k} ')
try:
setattr(self, _k, _jsonData[_k])
except Exception as e:
self.logger.error(f'Unable to RestoreParameters {_k} ')
self.jobId = f"{self.jobName} - {self.getNetworkName()}"
return True
#
#
#
# Jobs CORE
#
#
#
def DoJob(self):
t=threading.Thread(target=self.__DoJob_T__, args=(), daemon=self._daemonize_job)
t.start()
self.jobProcessInstance = t
def __refreshProgessDatas__(self):
self._lock.acquire()
if self._jobRunning:
try:
self._jobElapsedTime = float(time.time() - self._jobStartTime).__round__(2)
except Exception as e:
pass
'''
if self._jobResult != None:
self._jobSize = self.__convert_size__(sys.getsizeof(str(self._jobResult)))
'''
try:
_max = self._jobDetailedProgress_max
_cur = self._jobDetailedProgress_cur
self._jobProgressPercent = float(( _cur/_max)*100).__round__(2)
except Exception as e:
pass
self._lock.release()
def __waitJobRequirements__(self):
_allDone = False
self.setProgress(f'Waiting Requirement Jobs : {self._jobOtherJobRequirements}')
while not _allDone:
self._jobStatus='Waiting'
_allDone = True
for _jNum in self._jobOtherJobRequirements:
_j = _jNum
if not _j._jobRunning and _j._jobDone:
_allDone = _allDone and True
if not _allDone:
time.sleep(5)
def __DoJob_T__(self, evt=None):
#self.jobId = f"{self.jobName} - {self.getNetworkName()}"
#self.logger.info(f'JOB : {self.jobId}')
if self._jobRunning :
return
self._jobRunning = True
self._jobStatus='Running'
if len(self._jobOtherJobRequirements) > 0 :
self.__waitJobRequirements__()
self._jobStatus='Running'
self._jobStartTime = time.time()
#
#Delay Before to manage spam request
if self._jobDelayBefore > 0:
time.sleep(self._jobDelayBefore)
#
# Process
#
if not self._run_safe:
self.JobProcess()
else:
try:
_jobRunningTimeout = self._jobMaxRunningTime
#self.setProgress(f'Start Job in same thread')
self.logger.info(f'{self.jobName} started in the same thread.' )
self.JobProcess()
'''
else:
self.logger.error(f'MULTIPROCESSING NOT IMPLEMENTED' )
self.logger.info(f'{self.jobName} started in a new process for timeout management, no progress report available.' )
jobProcessInstance = Process(target=self.JobProcess, name=f'JobProcess : {self.jobName}')
jobProcessInstance.start()
self.jobProcessInstance = jobProcessInstance
self.setProgress(f'Process Running...')
jobProcessInstance.join(timeout=_jobRunningTimeout)
if jobProcessInstance.exitcode != 0:
jobProcessInstance.terminate()
self.logger.error(f'JOB ERROR : Job Running Time exceeded')
self._jobError = f'JOB ERROR : Job Running Time exceeded'
self._jobStatus='error'
self.setProgress(f'JOB ERROR : Job Running Time exceeded')
#raise('JOB ERROR : Job Running Time exceeded')
'''
except Exception as e:
self.logger.exception(f'JOB ERROR : {e}')
self._jobError = f'JOB ERROR : {e}'
self._jobStatus='error'
self.setProgress(f'JOB ERROR : {e}')
#
#Delay After to manage ?
if self._jobDelayAfter > 0 and self._jobError == None:
time.sleep(self._jobDelayAfter)
if self._jobError == None:
if not self._jobSaved:
self.SaveResult()
self._jobDone=True
self._jobStatus='Done'
self.ExecuteCallbacks()
#
#Notification if activated or error
#
self._jobStopTime = time.time()
self._jobElapsedTime = float(self._jobStopTime - self._jobStartTime).__round__(2)
if self._notify_at_end or self._jobError != None:
_type = 'success'
_t = f"Job Done ! "
_m = f"{self.jobName}"
if self._jobError != None:
_t = f"{self.jobName}"
_m = f"{self._jobError}"
_type = 'error'
#UserSystemNotification(self.parentFrame, title=_t, message=_m, _type=_type)
wx.CallAfter(UserSystemNotification,self.parentFrame, title=_t, message=_m, _type=_type )
if self._jobError == None:
self.setProgress(f"Job Complete ({self._jobElapsedTime} seconds)")
self._jobRunning = False
def __RemoteProtection__(self):
if self._jobFromRemote:
_SafeGuardMessage = f"The requested job(s) is not allowed on the remote relay : {self._initalJobRequest}"
self.setProgress(f'{_SafeGuardMessage}')
self.setError(_SafeGuardMessage)
return self._jobFromRemote
def __GetThreadId__(self):
if self.jobProcessInstance != None:
# returns id of the respective thread
if hasattr(self.jobProcessInstance, '_thread_id'):
return self.jobProcessInstance._thread_id
for id, thread in threading._active.items():
if thread is self.jobProcessInstance:
return id
def __KillJob__(self, reason="Job Killed (no reason)."):
if self.jobProcessInstance != None:
#self.jobProcessInstance.terminate()
self._lock.acquire()
#self.jobProcessInstance._set_tstate_lock()
#self.jobProcessInstance._stop()
self._jobStatus='Error'
self.setProgress(f'{reason}')
self.setError(f'{reason}')
#f"Job Manager : {j.jobName} as exceeded the maximum running time ({j._jobMaxRunningTime} seconds), killing thread..."
#self.setProgress(f'Running Time exceeded ({self._jobMaxRunningTime} seconds), Job aborded.')
thread_id = self.__GetThreadId__()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,
ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
#print('Exception raise failure')
self._jobRunning = False
self._lock.release()
return True
return False
def __MainThreadCall__(self, function, *args):
try:
wx.CallAfter(function, *args)
except Exception as e:
self.logger.exception(f'JOB ERROR __MainThreadCall__ : {e}')
def __convert_size__(self,size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
"""
'_jobTxStandby': self._jobTxStandby,
'_jobPaymentStandby': self._jobPaymentStandby,
'_jobTxStandbyDescription': self._jobTxStandbyDescription,
"""
def setPaymentStandby(self, PaymentDatas, PaymentDescription, PaymentAmount='1 RVN', PaymentTx=None, PaymentStatus=''):
self._jobTxStandby = PaymentTx
self._jobPaymentStandby = PaymentDatas
self._jobTxStandbyDescription = PaymentDescription
self._jobPaymentAmount = PaymentAmount
self._jobPaymentStatus = PaymentStatus
def setPaymentDone(self, PaymentTxId=None):
self._jobTxStandby = PaymentTxId
self._jobPaymentStandby = '-'
#self._jobTxStandbyDescription = PaymentDescription
self._jobPaymentAmount = '0 RVN'
self._jobPaymentStatus = 'Transaction Complete'
def setReusable(self, Reusable=True):
self._jobReusable = Reusable
def removeNetworkCompatibility(self, val):
if self._jobNetworkCompatibility.__contains__(val):
self._jobNetworkCompatibility.remove(val)
def addNetworkCompatibility(self, val):
self._jobNetworkCompatibility.append(val)
def setAllowRemoteExecution(self, val):
self.addNetworkCompatibility('WS-RPC')
self._JobAllowRemoteExecution = val
def __setJobNumber__(self, jobNum):
self._jobNumber = jobNum
def addJobRequirement(self, jobObj):
self._jobOtherJobRequirements.append(jobObj)
def setMaxRunningTime(self, seconds):
self._jobMaxRunningTime = seconds
def __checkRunningTimeout__(self):
_timeout=False
if self._jobMaxRunningTime > 0:
self.__refreshProgessDatas__()
if self._jobElapsedTime > self._jobMaxRunningTime :
_timeout=True
return _timeout
#
# Special function to handle network on tasks
#
def getNetworkName(self):
_returnNetwork = self._jobNetwork
if self._jobNetwork == None:
_returnNetwork = self.parentFrame.getNetworkName()
return self._jobNetwork
def setNetwork(self, val=None):
self._jobNetwork = val
self.jobId = f"{self.jobName} - {self._jobNetwork}"
def getNetworkRPC(self):
return self.parentFrame.getNetwork(self.getNetworkName())
def getNetworkRavencoin(self):
return self.parentFrame.getRavencoin(self.getNetworkName())
def setFromRemote(self, val):
self._jobFromRemote = True
def setDelays(self, before=0, after=0):
self._jobDelayAfter = after
self._jobDelayBefore = before
def addExportParam(self, paramname):
self._export_params_list.append(paramname)
def setNotification(self, enabl=True):
self._notify_at_end = enabl
def setExpiration(self, seconds=-1):
self._jobResultExpire = seconds
def setMax(self,max):
self._jobDetailedProgress_max=max
| |
<filename>code/mqsubmit.py
#!/usr/bin/python
"""
mqsubmit.py: submits a maxquant job to the cloud based automation pipeline
"""
import os
import optparse
import random
import re
import sys
import time
import boto3
import botocore
import xml.etree.ElementTree as ET
def adjustConfig(mqconfig, mqdir, mqparams):
"""
Takes the MaxQuant GUI generated XML configuration file and updates the data and fasta file paths so they
are changed from where they where when created, to where they are going to be on the Cloud server. It also
sets the mumber threads that will be used on the cloud server to run the job.
It returns a list of the datafiles and a list of the fasta files for the purpose of the S3 uploads
"""
tree = ET.parse(mqconfig)
root = tree.getroot()
# Get list of datafiles (mzXML and RAW) and fix the file paths
datafiles = []
for filePaths in root.findall('filePaths'):
files = filePaths.findall('string')
for d in files:
dfile = (d.text).split('\\')[-1]
datafiles.append(dfile)
dpath = mqdir + dfile
d.text = dpath
# Get list of fasta files and fix the file paths
fastas = []
for fastaFiles in root.findall('fastaFiles'):
fasta = fastaFiles.findall('string')
# Starting in MaxQuant version 1.6.10.43 (or at least noticed then),
# the xml file contains a different structure for describing the fasta file(s).
# In previous versions it's like:
# <fastaFiles>
# <string>c:\mq-job\UP000005640_9606_human.fasta</string>
# </fastaFiles>
# Now it's like this:
# <fastaFiles>
# <FastaFileInfo>
# <fastaFilePath>C:\mq-job\yeast_orf_trans_all_05-Jan-2010.fasta</fastaFilePath>
# <identifierParseRule>>([^\s]*)</identifierParseRule>
# <descriptionParseRule>>(.*)</descriptionParseRule>
# <taxonomyParseRule></taxonomyParseRule>
# <variationParseRule></variationParseRule>
# <modificationParseRule></modificationParseRule>
# <taxonomyId></taxonomyId>
# </FastaFileInfo>
# </fastaFiles>
# Not sure if we care about anything except `fastaFilePath`.
if not fasta: # old format wasn't found, look for new
tmp = fastaFiles.findall('FastaFileInfo')
for i in tmp:
fasta = i.findall("fastaFilePath")
for f in fasta:
ffile = (f.text).split('\\')[-1]
fastas.append(ffile)
fpath = mqdir + ffile
f.text = fpath
# how many threads should the job use
threads = pickInstanceType(filePaths, mqparams)[1]
cthreads = root.find('numThreads')
cthreads.text = threads
# re-write the updated configuration with the updated path and thread changes
tree.write(mqconfig)
# MaxQuant is a Windows program after all
os.popen("/usr/bin/unix2dos %s >> /dev/null 2>&1" % mqconfig)
return datafiles, fastas
def pickInstanceType(fileList, mqparams):
"""
Determine which type of EC2 instance should be used and how many threads to use
based on the number of datafiles the job has.
"""
fileCount = len(fileList)
if fileCount <= 2:
instanceType = "c4.large"
threads = str(fileCount)
elif fileCount <= 4:
instanceType = "c4.xlarge"
threads = str(fileCount)
elif fileCount <= 8:
instanceType = "c4.2xlarge"
threads = str(fileCount)
elif fileCount <= 16:
instanceType = "c4.4xlarge"
threads = str(fileCount)
elif fileCount <= 36:
instanceType = "c4.8xlarge"
threads = str(fileCount)
elif fileCount >= 36:
instanceType = "c4.8xlarge"
threads = "36"
return instanceType, threads
def getDataSize(datafiles):
"""
Determine the total size of the data files in this job. This information is used to
calculate the size of the EBS volume attached to the job server.
"""
total_size = 0
for f in datafiles:
if os.path.isfile(f):
total_size += os.path.getsize(f)
total = total_size / 1000 / 1000 / 1000
return int(round(total))
def passwordGen(plength):
"""
Generate a random string suitable for use as a password. This is used later to generate a password for the
local Administrator account on the job server.
"""
chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!'
p = []
for char in range(plength):
p.append(random.choice(chars))
return(''.join(p))
def checkJobAlreadyExists(mqBucket, jobFolder):
"""
Check to see if the job already exists to avoid overwritting it
"""
s3 = boto3.resource('s3', 'us-west-2')
exists = False
try:
s3.Object(mqBucket, "{0}/mqpar.xml".format(jobFolder)).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
exists = False
else:
raise e
else:
exists = True
return exists
def uploadS3(mqBucket, jobFolder, mqparams, mqconfig):
"""
Upload the datafiles, fastafiles, configuration file, etc... needed by the job to
the job folder in the maxquant-jobs S3 bucket
"""
client = boto3.client('s3', 'us-west-2')
transfer = boto3.s3.transfer.S3Transfer(client)
print("\nUploading data file(s)...")
for f in mqparams['mzxmlFiles']:
sys.stdout.write("\tUploading: {0}...".format(f))
transfer.upload_file(f, mqBucket, "{0}/{1}".format(jobFolder, f))
print(" Done!")
print("\nUploading FASTA file(s)...")
for f in mqparams['fastaFiles']:
sys.stdout.write("\tUploading: {0}...".format(f))
transfer.upload_file(f, mqBucket, "{0}/{1}".format(jobFolder, f))
print(" Done!")
sys.stdout.write("\nUploading configuration file...")
transfer.upload_file(mqconfig, mqBucket, "{0}/{1}".format(jobFolder, "mqpar.xml"))
print(" Done!")
# If a custom database was provided, upload it to the job folder in S3
if 'database' in mqparams:
sys.stdout.write("\nUploading custom databases.xml file...")
transfer.upload_file(mqparams['database'], mqBucket, "{0}/{1}".format(jobFolder, mqparams['database']))
print(" Done!")
# If a custom modifications file was provided, upload it to the job folder in S3
if 'modifications' in mqparams:
sys.stdout.write("\nUploading custom modifications.xml file...")
transfer.upload_file(mqparams['modifications'], mqBucket, "{0}/{1}".format(jobFolder, mqparams['modifications']))
print(" Done!")
sys.stdout.write("\nSetting Job Ready Flag...")
# Create a file object that contains metadata about the job
client.put_object(Body="{0},{1},{2}".format(mqparams['jobName'], mqparams['department'], mqparams['contactEmail']), Bucket = mqBucket, Key="{0}/jobCtrl/jobinfo.txt".format(jobFolder))
# Create a file object signaling that the job is ready to run
client.put_object(Body="ready", Bucket = mqBucket, Key="{0}/jobCtrl/ready.txt".format(jobFolder))
# Precalcuate and generate a temp url to the not yet created results and save it in a text file to use when job is complete
resultsUrl = genTempUrl(mqBucket, jobFolder).strip()
client.put_object(Body = resultsUrl, Bucket = mqBucket, Key="{0}/jobCtrl/resultsUrl.txt".format(jobFolder))
print(" Done!")
def startWorker(mqBucket, mqparams, UserDataScript):
"""
Create an job server in AWS/EC2. This process creates the server, installs maxquant and starts running the job (via user data script)
"""
region = 'us-west-2'
securityGroups = ['sg-a2dd8dc6']
instanceType = mqparams['instanceType']
subnetId = 'subnet-a95a0ede'
# The volume should be four times the size of the datafiles (room for resutls) and padded 150GB.
volumeSize = (getDataSize(mqparams['mzxmlFiles']) * 4) + 150
password = <PASSWORD>(15)
UserData = UserDataScript.format(bucket = mqBucket, jobFolder = "{0}-{1}".format(mqparams['department'], mqparams['jobName']), jobContact = mqparams['contactEmail'], password = password)
image_id = find_image(region)
#image_id = 'ami-59ba7139' # hack until ThermoFisher MSFileReader can be packaged, when fixed delete this and uncomment line above
instanceID = create_ec2worker(region, image_id, securityGroups, instanceType, subnetId, volumeSize, UserData, mqparams)
return instanceID, password
def genTempUrl(mqBucket, jobFolder):
"""
Generate a temporary signed URL to the results bundle
"""
client = boto3.client('s3')
expiresIn = 2937600 # 34 days
resultsBundleFile = "maxquant-{0}-results-combined.zip".format(jobFolder)
url = client.generate_presigned_url('get_object', Params = {'Bucket': mqBucket, 'Key': "{0}/{1}".format(jobFolder, resultsBundleFile)}, ExpiresIn = expiresIn)
return url
def checkfiles(files):
"""
Check to see if the files exists before attempting to upload
"""
missing = []
for f in files:
if not os.path.isfile(f):
missing.append(f)
if missing:
print("Error: the following files were not found in the job directory:")
for m in missing:
print("\t{0}".format(m))
sys.exit(1)
def main(parms):
"""
Program execution starts and is driven from this function
"""
print("\nMaxQuant version: %s\n" % maxquant_ver)
mqparams = {}
# Store the job metadata provided via command-line parameters in the mqparams dict that will hold all info about the job
mqparams['jobName'] = parms.jobname.strip().replace(' ','')
mqparams['department'] = parms.department.strip().replace(' ','')
mqparams['contactEmail'] = parms.contact.strip().replace(' ','')
# If a custom 'databases.xml' file is found in the job submission directory, include it.
if os.path.isfile("databases.xml"):
print("Found custom 'databases.xml' file...")
mqparams['database'] = "databases.xml"
# If a custom 'modifications.xml' file is found in the job submission directory, include it.
if os.path.isfile("modifications.xml"):
print("Found custom 'modifications.xml' file...")
mqparams['modifications'] = "modifications.xml"
# This is the top-level S3 bucket that all job folders will live under
mqBucket = "fredhutch-maxquant-jobs"
# The job files will be uploaded and run in this directory on the job server
mqdir = "c:\\mq-job\\"
# The job folder in S3 that will hold the data/results (child of the maxquant jobs bucket)
jobFolder = "{0}-{1}".format(mqparams['department'], mqparams['jobName'])
sys.stdout.write("Adjusting MaxQuant configuration file: {0}...".format(parms.mqconfig))
# Adjust the config file (update paths, threads)
datafiles, fastas = adjustConfig(parms.mqconfig, mqdir, mqparams)
print(" Done!")
# Check to see that the data and fasta files listed in the maxquant configuration file (XML) are located in the job directory
checkfiles(datafiles)
checkfiles(fastas)
# Store the file inventory and calculated instance type in the mq job dictionary
mqparams['mzxmlFiles'] = [e.strip() for e in datafiles]
mqparams['fastaFiles'] = [e.strip() for e in fastas]
mqparams['instanceType'] = pickInstanceType(mqparams['mzxmlFiles'], mqparams)[0]
# Make sure that this is a uniqe job (department + jobname) so a previous jobs files in S3 don't get overwritten
if checkJobAlreadyExists(mqBucket, jobFolder):
print("\nThere is already an existing job named '{0}' for the '{1}' department/lab; choose a different job name and try again".format(mqparams['jobName'], mqparams['department']))
sys.exit(1)
# Upload all the jobs files to the | |
<filename>TESTDPP.py
import argparse
import baselineUtils
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import os
import time
from transformer.batch import subsequent_mask
from torch.optim import Adam,SGD,RMSprop,Adagrad
from transformer.noam_opt import NoamOpt
import numpy as np
import scipy.io
import json
import pickle
from quantized_TFsamples import QuantizedTF
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
import quantized_TF
np.random.seed(seed=7)
torch.seed()
#we write a path creator
'''here the fun to run in every batch, it will return a dict with all paths for each element of the batch
'''
def convertfunc(list_of_tensor, clusters, batch_src, key):
final_list = []
#print('ONE LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP')
for tens in list_of_tensor:
#print('THE SHAPE OF THE TENSOR')
#print(tens.shape)
#print('WHAT I PUT INSIDEEEEEEEEEEEEEEEEEEEEEE')
#print(tens.cpu().numpy())
#print(clusters[tens.cpu().numpy()].cumsum(1)+batch_src[key,-1:,0:2].cpu().numpy())
#print('BYEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE')
final_list.append(clusters[tens.cpu().numpy()].cumsum(1)+batch_src[key,-1:,0:2].cpu().numpy())
return(final_list)
def Path_creator(n_in_batch, inp, dec_inp, src_att, model, device, temperature = 0.1, dim_temp = 12):
'''
first we write the rule for selecting #samples at each future step
#temp ranges between 0 and 1
'''
assert temperature <= 0.5
assert temperature > 0
num_samp_times = torch.zeros(dim_temp, dtype=torch.uint8)
for i in range(dim_temp):
num = temperature*(dim_temp*dim_temp)//((i+2)**2)
if num == 0:
num_samp_times[i] = 1.
else:
num_samp_times[i] = temperature*(dim_temp*dim_temp)//((i+2)**2)
'''
So our sampling rule is the tensor num_samp_times of dim dim_temp, call the singletons with .item()
'''
our_rule = num_samp_times #[7, 3, 1, 1, 1, 1, 1, 1, 1, 1]
#our_rule = torch.tensor([1,1,1,1,1,1,1,1,1,1,1,1])
print('How many samples for each input', torch.prod(our_rule, 0).item())
'''
create the storing place(global) and the final dict structure
'''
storing_place = [torch.empty((n_in_batch, 1)).to(device) for i in range(dim_temp)]
final_dict = {}
#here for each i there will be stores the paths as lists
for i in range(n_in_batch):
final_dict[str(i)]= []
'''
now we prepare everything, and save first step in the store_place
'''
'''call the recursive patheator'''
#but where do we start calling it?
step = 0
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out, out_clusters = model.predict(inp, dec_inp, src_att, trg_att)
input_columns = out_clusters[:, :our_rule[0]].to(device) #our_rule[0]
storing_place[step] = torch.cat((storing_place[0],input_columns), dim = 1)
recursive_patheator(dec_inp, step + 1, input_columns, n_in_batch, our_rule, inp, src_att, model, final_dict, storing_place, device)
#print('###############################################')
#print('Computing the final dict')#, final_dict)
#print('###############################################')
return final_dict
'''modify function that transformrs in paths'''
'''compute metrics'''
'''cry'''
'''
Here the recursive function that creates the paths
'''
def recursive_patheator(dec_inp, step, input_columns, num_in_batch, our_rule, inp, src_att, model, final_dict, storing_place, device):
if step == 12:
dec_inp = torch.cat((dec_inp, input_columns), dim = 1)
#print('dec_inp shape', dec_inp.shape)
assert dec_inp.shape[1] == our_rule.shape[0] + 1
for i in range(num_in_batch):
#global final_dict
final_dict[str(i)].append(dec_inp[i, 1:])
return
else:
#print('dec_inp shape', dec_inp.shape)
for i in range(input_columns.shape[1]):
#print(dec_inp.shape, input_columns[:, i].shape)
dec_inp_next = torch.cat((dec_inp, input_columns[:, i].reshape(-1, 1)), dim = 1).to(device)
#run the model
trg_att = subsequent_mask(dec_inp_next.shape[1]).repeat(num_in_batch, 1, 1).to(device)
out, out_clusters = model.predict(inp, dec_inp_next, src_att, trg_att )
#print('step', step)
#print('our_rule(step)', our_rule[step])
new_columns = out_clusters[:, :our_rule[step]].to(device)
#print('the new columns shape', new_columns.shape)
#global storing_place
#print('fucking shapes', storing_place[step].shape , new_columns.shape)
storing_place[step] = torch.cat((storing_place[step], new_columns), dim = 1)
recursive_patheator(dec_inp_next, step + 1, new_columns, num_in_batch, our_rule, inp, src_att, model,final_dict, storing_place, device)
'''
Here are functions for getting the mad and fad metrics from gt noisy and pr dictionaries
Based on the paper, for eah element in noist gt we calculate the distance
between that and all prs samples and get the distance that is min
For mad we have also arr.shape[0]=12 in denominator but for fad we don't have it
'''
'''
add comment
'''
def diversity_metric(samples):
final_norm = 0
#print(samples)
for i in samples:
temp_min = np.inf
for j in samples:
if np.sum(i-j) == 0 :
pass
else:
#print(i)
# print(j)
norm = np.linalg.norm(i-j)
if norm < temp_min:
temp_min = norm
# print(norm)
final_norm += temp_min
print(temp_min)
print('final_norm', final_norm/len(samples))
return final_norm/len(samples) #you have to divide
def our_diversity_metric(preds_dict):
final_div = 0
for key,values in preds_dict.items() :
final_div += diversity_metric(values)
return final_div/len(preds_dict), final_div
'''
add comment
'''
def get_min_distance_ADE(list_arr1, list_arr2) :
#list_arr2 is the predicted
#list_arr1 is the GT
list_min = []
for arr in list_arr1 :
#print(arr.shape[0])
min_dist = np.inf
for arr2 in list_arr2 :
# print(np.sum((arr - arr2)**2))
#print(arr-arr2)
dist = np.sum((arr - arr2)**2)
if dist < min_dist:
min_dist = dist
list_min.append(min_dist)
#print(list_min)
#print(np.sum(list_min))
#print(len(list_min))
#print(np.sum(list_min) / (len(list_min) * arr.shape[0]))
return np.sum(list_min) / (len(list_min) * arr.shape[0])
'''
add comment
'''
def get_min_distance_FDE(list_arr1, list_arr2) :
list_min = []
for arr in list_arr1 :
#print(arr.shape[0])
min_dist = np.inf
for arr2 in list_arr2 :
dist = np.sum((arr[-1,:] - arr2[-1,:])**2)
if dist < min_dist:
min_dist = dist
list_min.append(min_dist)
return np.sum(list_min) / len(list_min)
'''
add comment
'''
def get_metrics_ADEandFDE(gts_dict, prs_dict):
dict_of_metrics = {}
FDE = 0
ADE = 0
for key,values in gts_dict.items() :
FDE += get_min_distance_ADE(values, prs_dict[key])
ADE += get_min_distance_FDE(values, prs_dict[key])
#now take average
scaling = len(gts_dict)
return FDE/scaling, ADE/scaling, FDE, ADE
def main():
parser=argparse.ArgumentParser(description='Train the individual Transformer model')
parser.add_argument('--dataset_folder',type=str,default='datasets')
parser.add_argument('--dataset_name',type=str,default='zara1')
parser.add_argument('--obs',type=int,default=8)
parser.add_argument('--preds',type=int,default=12)
parser.add_argument('--emb_size',type=int,default=512)
parser.add_argument('--heads',type=int, default=8)
parser.add_argument('--layers',type=int,default=6)
parser.add_argument('--cpu',action='store_true')
parser.add_argument('--verbose',action='store_true')
parser.add_argument('--batch_size',type=int,default=256)
parser.add_argument('--delim',type=str,default='\t')
parser.add_argument('--name', type=str, default="zara1")
parser.add_argument('--epoch',type=str,default="00015")
parser.add_argument('--num_samples', type=int, default="20")
args=parser.parse_args()
model_name=args.name
'''
i don't think we need this
try:
os.mkdir('models')
except:
pass
try:
os.mkdir('output')
except:
pass
try:
os.mkdir('output/QuantizedTFsamples')
except:
pass
try:
os.mkdir(f'models/QuantizedTFsamples')
except:
pass
try:
os.mkdir(f'output/QuantizedTFsamples/{args.name}')
except:
pass
try:
os.mkdir(f'models/QuantizedTFsamples/{args.name}')
except:
pass
'''
#log=SummaryWriter('logs/%s'%model_name)
# log.add_scalar('eval/mad', 0, 0)
# log.add_scalar('eval/fad', 0, 0)
device=torch.device("cuda")
if args.cpu or not torch.cuda.is_available():
device=torch.device("cpu")
args.verbose=True
## creation of the dataloaders for train and validation
test_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)
mat = scipy.io.loadmat(os.path.join(args.dataset_folder, args.dataset_name, "clusters.mat"))
clusters=mat['centroids']
num_samples = 20 #for now
#print(args.layers)
model=QuantizedTF(clusters.shape[0], clusters.shape[0]+1, clusters, clusters.shape[0], N=args.layers,
d_model=args.emb_size, d_ff=1024, h=args.heads).to(device)
model.load_state_dict(torch.load(f'models/QuantizedTFsamples/{args.name}/{args.epoch}.pth'))
model.to(device)
'''
Now we have a tf that gives in output a batch of dim [1024, 10, 2]
We will create many paths on it, all paths influenced by the selection of the previous step
MASK will be a mess but we have hope
'''
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)
with torch.no_grad():
model.eval()
gt=[]
pr=[]
inp_=[]
peds=[]
frames=[]
dt=[]
ADE = 0
FDE = 0
final_diversity = 0
#I need for later the num of test elements
num_of_elements = 0
for id_b, batch in enumerate(test_dl):
print(f"batch {id_b:03d}/{len(test_dl)-1}")
peds.append(batch['peds'])
frames.append(batch['frames'])
dt.append(batch['dataset'])
scale = np.random.uniform(0.5, 2)
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
#print('THIS IS THE GROUND TRUTH')
#print('shape', gt_b.shape)
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
#print(start_of_seq.shape)
dec_inp = start_of_seq
dim_temp = 12
storing_place = [torch.empty((n_in_batch, 1)).to(device) for i in range(dim_temp)]
final_dict = {}
#here for each i there will be stores the paths as lists of tensors(later)
for i in range(n_in_batch):
final_dict[str(i)]= []
#here recursively we samples paths following a rule
dict_paths = Path_creator(n_in_batch, inp, dec_inp, src_att, model, device, temperature = 0.2, dim_temp = 12)
#print('path_rule', path_rule)
#the path rule is a dict with list of tensors at each element
#print('PATH RULE 33')
#print('##################################################################################')
#print(dict_paths['33'])
#print('its length', len(dict_paths['33']))
#print('##################################################################################')
#Here is for transforming the dictionary
my_dictionary = {k: convertfunc(v, clusters, batch['src'], int(k)) for k, v in dict_paths.items()}
num_of_elements += len(my_dictionary)
#print(num_of_elements)
del dict_paths
#print('###############################################################')
#print('The final dict for 1 batch')
#print(len(my_dictionary['33']))
#print('#############')
#print(my_dictionary['33'][0])
#print('#############')
#print(my_dictionary['33'])
#print('###############################################################')
'''
to implement the distances we have to augment the groun truth
for each element(e.g. '33') normally we would have 1 GT
but for the metrics we need #samples GT for every element
exemplum gratia GT_33 : [[0.2,0.1], [0.4 , 0.8]] (imagine only 2 steps)
===========> we want, if we sampled 3 paths for every sample
GT_33_a ~ GT_33
GT_33_b ~ noise + GT_33
GT_33_c ~ //
'''
dict_of_noisy_GT = {}
for i in range(n_in_batch):
dict_of_noisy_GT[str(i)]= [gt_b[i].numpy()]
How_many_to_noise = len(my_dictionary['0'])
#print('1 GT', dict_of_noisy_GT['33'])
for key, value in dict_of_noisy_GT.items():
for i in range(How_many_to_noise - 1):
noise = np.random.normal(0, 1, dict_of_noisy_GT[key][0].shape)
new_signal = dict_of_noisy_GT[key][0] + noise
dict_of_noisy_GT[key].append(new_signal)
#print('after we add noise')
#print('##########################################')
#print('1 GT', dict_of_noisy_GT['33'])
#print('##########################################')
#now we have noise and we have to compute the metrics for every batch
#add them and study them
#we have a dict with preds (every key has n samples)
#we have a dict with Gts (every k has 1GT and n-1 NoisyGT)
temp_ADE, temp_FDE, ADE_unsc, FDE_unsc = get_metrics_ADEandFDE(dict_of_noisy_GT, my_dictionary)
diversity_metric_, div_m= our_diversity_metric(my_dictionary)
print('#################################################################')
print('ADE for this batch{id_b:03d}/{len(test_dl)}', temp_ADE )
print('FAD for this batch{id_b:03d}/{len(test_dl)}', temp_FDE )
print('Diversity metric for batch{id_b:03d}/{len(test_dl)}', diversity_metric_)
print('#################################################################')
#del my_dictionary
#del dict_of_noisy_GT
#for the final average now we save the ade and fad and at end of test | |
<reponame>benoitc/pypy<gh_stars>1-10
from pypy.objspace.std.model import registerimplementation, W_Object
from pypy.objspace.std.register_all import register_all
from pypy.objspace.std.multimethod import FailedToImplement
from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.interpreter import gateway
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.unicodeobject import _normalize_index
from pypy.objspace.std.ropeobject import W_RopeObject
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.rlib import rope
from pypy.rlib.rstring import StringBuilder
from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice
from pypy.objspace.std import unicodeobject, slicetype, iterobject
from pypy.objspace.std.tupleobject import W_TupleObject
from pypy.rlib.rarithmetic import intmask, ovfcheck
from pypy.module.unicodedata import unicodedb
from pypy.tool.sourcetools import func_with_new_name
from pypy.objspace.std.formatting import mod_format
from pypy.objspace.std.unicodeobject import (
format__Unicode_ANY as format__RopeUnicode_ANY)
def wrapunicode(space, uni):
return W_RopeUnicodeObject(rope.rope_from_unicode(uni))
def unicode_from_string(space, w_str):
from pypy.objspace.std.unicodetype import getdefaultencoding
assert isinstance(w_str, W_RopeObject)
encoding = getdefaultencoding(space)
w_retval = decode_string(space, w_str, encoding, "strict")
if not space.isinstance_w(w_retval, space.w_unicode):
raise operationerrfmt(
space.w_TypeError,
"decoder did not return an unicode object (type '%s')",
space.type(w_retval).getname(space))
assert isinstance(w_retval, W_RopeUnicodeObject)
return w_retval
def decode_string(space, w_str, encoding, errors):
from pypy.objspace.std.unicodetype import decode_object
if errors is None or errors == "strict":
node = w_str._node
if encoding == 'ascii':
result = rope.str_decode_ascii(node)
if result is not None:
return W_RopeUnicodeObject(result)
elif encoding == 'latin-1':
assert node.is_bytestring()
return W_RopeUnicodeObject(node)
elif encoding == "utf-8":
result = rope.str_decode_utf8(node)
if result is not None:
return W_RopeUnicodeObject(result)
w_result = decode_object(space, w_str, encoding, errors)
return w_result
def encode_unicode(space, w_unistr, encoding, errors):
from pypy.objspace.std.unicodetype import getdefaultencoding, \
_get_encoding_and_errors, encode_object
from pypy.objspace.std.ropeobject import W_RopeObject
if errors is None or errors == "strict":
node = w_unistr._node
if encoding == 'ascii':
result = rope.unicode_encode_ascii(node)
if result is not None:
return W_RopeObject(result)
elif encoding == 'latin-1':
result = rope.unicode_encode_latin1(node)
if result is not None:
return W_RopeObject(result)
elif encoding == "utf-8":
result = rope.unicode_encode_utf8(node)
if result is not None:
return W_RopeObject(result)
return encode_object(space, w_unistr, encoding, errors)
class W_RopeUnicodeObject(unicodeobject.W_AbstractUnicodeObject):
from pypy.objspace.std.unicodetype import unicode_typedef as typedef
_immutable_fields_ = ['_node']
def __init__(w_self, node):
w_self._node = node
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%r)" % (w_self.__class__.__name__, w_self._node)
def unwrap(w_self, space):
# for testing
return w_self._node.flatten_unicode()
def str_w(w_self, space):
return space.str_w(space.str(w_self))
def create_if_subclassed(w_self):
if type(w_self) is W_RopeUnicodeObject:
return w_self
return W_RopeUnicodeObject(w_self._node)
def unicode_w(self, space):
return self._node.flatten_unicode()
W_RopeUnicodeObject.EMPTY = W_RopeUnicodeObject(rope.LiteralStringNode.EMPTY)
registerimplementation(W_RopeUnicodeObject)
def _isspace(uchar_ord):
return unicodedb.isspace(uchar_ord)
def ropeunicode_w(space, w_str):
if isinstance(w_str, W_RopeUnicodeObject):
return w_str._node
if isinstance(w_str, W_RopeObject):
return unicode_from_string(space, w_str)._node
return rope.LiteralUnicodeNode(space.unicode_w(w_str))
class W_RopeUnicodeIterObject(iterobject.W_AbstractIterObject):
from pypy.objspace.std.itertype import iter_typedef as typedef
def __init__(w_self, w_rope, index=0):
w_self.node = node = w_rope._node
w_self.item_iter = rope.ItemIterator(node)
w_self.index = index
def iter__RopeUnicode(space, w_uni):
return W_RopeUnicodeIterObject(w_uni)
# Helper for converting int/long
def unicode_to_decimal_w(space, w_unistr):
if not isinstance(w_unistr, W_RopeUnicodeObject):
raise OperationError(space.w_TypeError,
space.wrap("expected unicode"))
unistr = w_unistr._node
length = unistr.length()
result = ['\0'] * length
digits = [ '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9']
iter = rope.ItemIterator(unistr)
for i in range(length):
uchr = iter.nextint()
if unicodedb.isspace(uchr):
result[i] = ' '
continue
try:
result[i] = digits[unicodedb.decimal(uchr)]
except KeyError:
if 0 < uchr < 256:
result[i] = chr(uchr)
else:
w_encoding = space.wrap('decimal')
w_start = space.wrap(i)
w_end = space.wrap(i+1)
w_reason = space.wrap('invalid decimal Unicode string')
raise OperationError(space.w_UnicodeEncodeError, space.newtuple([w_encoding, w_unistr, w_start, w_end, w_reason]))
return ''.join(result)
# string-to-unicode delegation
def delegate_Rope2RopeUnicode(space, w_rope):
w_uni = unicode_from_string(space, w_rope)
assert isinstance(w_uni, W_RopeUnicodeObject) # help the annotator!
return w_uni
def str__RopeUnicode(space, w_uni):
return space.call_method(w_uni, 'encode')
def lt__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) < 0)
def le__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) <= 0)
def _eq(w_str1, w_str2):
result = rope.eq(w_str1._node, w_str2._node)
return result
def eq__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
return space.newbool(_eq(w_str1, w_str2))
def eq__RopeUnicode_Rope(space, w_runi, w_rope):
from pypy.objspace.std.unicodeobject import _unicode_string_comparison
return _unicode_string_comparison(space, w_runi, w_rope,
False, unicode_from_string)
def ne__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
return space.newbool(not _eq(w_str1, w_str2))
def ne__RopeUnicode_Rope(space, w_runi, w_rope):
from pypy.objspace.std.unicodeobject import _unicode_string_comparison
return _unicode_string_comparison(space, w_runi, w_rope,
True, unicode_from_string)
def gt__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) > 0)
def ge__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) >= 0)
def ord__RopeUnicode(space, w_uni):
if w_uni._node.length() != 1:
raise OperationError(space.w_TypeError, space.wrap('ord() expected a character'))
return space.wrap(w_uni._node.getint(0))
def getnewargs__RopeUnicode(space, w_uni):
return space.newtuple([W_RopeUnicodeObject(w_uni._node)])
def add__RopeUnicode_RopeUnicode(space, w_left, w_right):
right = w_right._node
left = w_left._node
try:
return W_RopeUnicodeObject(rope.concatenate(left, right))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
def add__Rope_RopeUnicode(space, w_left, w_right):
return space.add(unicode_from_string(space, w_left) , w_right)
def add__RopeUnicode_Rope(space, w_left, w_right):
return space.add(w_left, unicode_from_string(space, w_right))
def contains__RopeUnicode_RopeUnicode(space, w_container, w_item):
item = w_item._node
container = w_container._node
return space.newbool(rope.find(container, item) != -1)
def contains__Rope_RopeUnicode(space, w_container, w_item):
return space.contains(unicode_from_string(space, w_container), w_item )
def unicode_join__RopeUnicode_ANY(space, w_self, w_list):
l_w = space.listview(w_list)
delim = w_self._node
totlen = 0
if len(l_w) == 0:
return W_RopeUnicodeObject.EMPTY
if (len(l_w) == 1 and
space.is_w(space.type(l_w[0]), space.w_unicode)):
return l_w[0]
values_list = []
for i in range(len(l_w)):
w_item = l_w[i]
if isinstance(w_item, W_RopeUnicodeObject):
# shortcut for performane
item = w_item._node
elif space.isinstance_w(w_item, space.w_str):
item = unicode_from_string(space, w_item)._node
else:
msg = 'sequence item %d: expected string or Unicode'
raise operationerrfmt(space.w_TypeError, msg, i)
values_list.append(item)
try:
return W_RopeUnicodeObject(rope.join(w_self._node, values_list))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
def hash__RopeUnicode(space, w_uni):
return space.wrap(rope.hash_rope(w_uni._node))
def len__RopeUnicode(space, w_uni):
return space.wrap(w_uni._node.length())
def getitem__RopeUnicode_ANY(space, w_uni, w_index):
ival = space.getindex_w(w_index, space.w_IndexError, "string index")
uni = w_uni._node
ulen = uni.length()
if ival < 0:
ival += ulen
if ival < 0 or ival >= ulen:
exc = space.call_function(space.w_IndexError,
space.wrap("unicode index out of range"))
raise OperationError(space.w_IndexError, exc)
return W_RopeUnicodeObject(uni.getrope(ival))
def getitem__RopeUnicode_Slice(space, w_uni, w_slice):
node = w_uni._node
length = node.length()
start, stop, step, sl = w_slice.indices4(space, length)
if sl == 0:
return W_RopeUnicodeObject.EMPTY
return W_RopeUnicodeObject(rope.getslice(node, start, stop, step, sl))
def getslice__RopeUnicode_ANY_ANY(space, w_uni, w_start, w_stop):
node = w_uni._node
length = node.length()
start, stop = normalize_simple_slice(space, length, w_start, w_stop)
sl = stop - start
if sl == 0:
return W_RopeUnicodeObject.EMPTY
return W_RopeUnicodeObject(rope.getslice(node, start, stop, 1, sl))
def mul__RopeUnicode_ANY(space, w_uni, w_times):
try:
times = space.getindex_w(w_times, space.w_OverflowError)
except OperationError, e:
if e.match(space, space.w_TypeError):
raise FailedToImplement
raise
node = w_uni._node
try:
return W_RopeUnicodeObject(rope.multiply(node, times))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
def mul__ANY_RopeUnicode(space, w_times, w_uni):
return mul__RopeUnicode_ANY(space, w_uni, w_times)
def make_generic(funcname):
def func(space, w_self):
node = w_self._node
if node.length() == 0:
return space.w_False
iter = rope.ItemIterator(node)
for idx in range(node.length()):
if not getattr(unicodedb, funcname)(iter.nextint()):
return space.w_False
return space.w_True
return func_with_new_name(func, "unicode_%s__RopeUnicode" % (funcname, ))
unicode_isspace__RopeUnicode = make_generic("isspace")
unicode_isalpha__RopeUnicode = make_generic("isalpha")
unicode_isalnum__RopeUnicode = make_generic("isalnum")
unicode_isdecimal__RopeUnicode = make_generic("isdecimal")
unicode_isdigit__RopeUnicode = make_generic("isdigit")
unicode_isnumeric__RopeUnicode = make_generic("isnumeric")
def unicode_islower__RopeUnicode(space, w_unicode):
cased = False
iter = rope.ItemIterator(w_unicode._node)
while 1:
try:
ch = iter.nextint()
except StopIteration:
return space.newbool(cased)
if (unicodedb.isupper(ch) or
unicodedb.istitle(ch)):
return space.w_False
if not cased and unicodedb.islower(ch):
cased = True
def unicode_isupper__RopeUnicode(space, w_unicode):
cased = False
iter = rope.ItemIterator(w_unicode._node)
while 1:
try:
ch = iter.nextint()
except StopIteration:
return space.newbool(cased)
if (unicodedb.islower(ch) or
unicodedb.istitle(ch)):
return space.w_False
if not cased and unicodedb.isupper(ch):
cased = True
def unicode_istitle__RopeUnicode(space, w_unicode):
cased = False
previous_is_cased = False
iter = rope.ItemIterator(w_unicode._node)
while 1:
try:
ch = iter.nextint()
except StopIteration:
return space.newbool(cased)
if (unicodedb.isupper(ch) or
unicodedb.istitle(ch)):
if previous_is_cased:
return space.w_False
previous_is_cased = cased = True
elif unicodedb.islower(ch):
if not previous_is_cased:
return space.w_False
previous_is_cased = cased = True
else:
previous_is_cased = False
def _contains(i, uni):
return unichr(i) in uni
def unicode_strip__RopeUnicode_None(space, w_self, w_chars):
return W_RopeUnicodeObject(rope.strip(w_self._node, True, True, _isspace))
def unicode_strip__RopeUnicode_RopeUnicode(space, w_self, w_chars):
return W_RopeUnicodeObject(rope.strip(w_self._node, True, True, _contains,
w_chars._node.flatten_unicode()))
def unicode_strip__RopeUnicode_Rope(space, w_self, w_chars):
return space.call_method(w_self, 'strip',
unicode_from_string(space, w_chars))
def unicode_lstrip__RopeUnicode_None(space, w_self, w_chars):
return W_RopeUnicodeObject(rope.strip(w_self._node, True, False, _isspace))
def unicode_lstrip__RopeUnicode_RopeUnicode(space, w_self, w_chars):
return W_RopeUnicodeObject(rope.strip(w_self._node, True, False, _contains,
w_chars._node.flatten_unicode()))
def unicode_lstrip__RopeUnicode_Rope(space, w_self, w_chars):
return space.call_method(w_self, 'lstrip',
unicode_from_string(space, w_chars))
def unicode_rstrip__RopeUnicode_None(space, w_self, w_chars):
return W_RopeUnicodeObject(rope.strip(w_self._node, False, True, _isspace))
def unicode_rstrip__RopeUnicode_RopeUnicode(space, w_self, w_chars):
return W_RopeUnicodeObject(rope.strip(w_self._node, False, True, _contains,
w_chars._node.flatten_unicode()))
def unicode_rstrip__RopeUnicode_Rope(space, w_self, w_chars):
return space.call_method(w_self, 'rstrip',
unicode_from_string(space, w_chars))
def unicode_capitalize__RopeUnicode(space, w_self):
input = w_self._node
length = input.length()
if length == 0:
return w_self
result = [u'\0'] * length
iter = rope.ItemIterator(input)
result[0] = unichr(unicodedb.toupper(iter.nextint()))
for i in range(1, length):
result[i] = unichr(unicodedb.tolower(iter.nextint()))
return W_RopeUnicodeObject(rope.rope_from_unicharlist(result))
def unicode_title__RopeUnicode(space, w_self):
input = w_self._node
length = input.length()
if length == 0:
return w_self
result = [u'\0'] * length
iter = rope.ItemIterator(input)
previous_is_cased = False
for i in range(input.length()):
unichar = iter.nextint()
if previous_is_cased:
result[i] = unichr(unicodedb.tolower(unichar))
else:
result[i] = unichr(unicodedb.totitle(unichar))
previous_is_cased = unicodedb.iscased(unichar)
return W_RopeUnicodeObject(rope.rope_from_unicharlist(result))
def _local_transform(node, transform):
l = node.length()
res = [u' '] * l
iter = rope.ItemIterator(node)
for i in range(l):
ch = iter.nextint()
res[i] = transform(ch)
return W_RopeUnicodeObject(rope.rope_from_unicharlist(res))
_local_transform._annspecialcase_ = "specialize:arg(1)"
def _tolower(ordch):
return unichr(unicodedb.tolower(ordch))
def unicode_lower__RopeUnicode(space, w_self):
return _local_transform(w_self._node, _tolower)
def _toupper(ordch):
return unichr(unicodedb.toupper(ordch))
def unicode_upper__RopeUnicode(space, w_self):
return _local_transform(w_self._node, _toupper)
def _swapcase(ordch):
if unicodedb.islower(ordch):
return unichr(unicodedb.toupper(ordch))
elif unicodedb.isupper(ordch):
return unichr(unicodedb.tolower(ordch))
else:
return unichr(ordch)
def unicode_swapcase__RopeUnicode(space, w_self):
return _local_transform(w_self._node, _swapcase)
def _convert_idx_params(space, w_self, w_start, w_end):
self = w_self._node
length = w_self._node.length()
if space.is_w(w_start, space.w_None):
w_start = space.wrap(0)
if space.is_w(w_end, space.w_None):
w_end = | |
# -*- coding: utf-8 -*-
"""
Utility functions for welly.
:copyright: 2016 Agile Geoscience
:license: Apache 2.0
"""
from __future__ import division
import re
import glob
import numpy as np
import matplotlib.pyplot as plt
def deprecated(instructions):
"""
Flags a method as deprecated. This decorator can be used to mark functions
as deprecated. It will result in a warning being emitted when the function
is used.
Args:
instructions (str): A human-friendly string of instructions, such
as: 'Please migrate to add_proxy() ASAP.'
Returns:
The decorated function.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
message = 'Call to deprecated function {}. {}'.format(
func.__name__,
instructions)
frame = inspect.currentframe().f_back
warnings.warn_explicit(message,
category=DeprecationWarning,
filename=inspect.getfile(frame.f_code),
lineno=frame.f_lineno)
return func(*args, **kwargs)
return wrapper
return decorator
def round_to_n(x, n):
"""
Round to sig figs
"""
return round(x, -int(np.floor(np.log10(x))) + (n - 1))
def null(x):
"""
Null function. Used for default in functions that can apply a user-
supplied function to data before returning.
"""
return x
def null_default(x):
"""
Null function. Used for default in functions that can apply a user-
supplied function to data before returning.
"""
def null(y):
return x
return null
def skip(x):
"""
Always returns None.
"""
return
def are_close(x, y):
return abs(x - y) < 0.00001
def sharey(axes):
"""
Shared axes limits without shared locators, ticks, etc.
By <NAME>
"""
linker = Linker(axes)
for ax in axes:
ax._linker = linker
def unsharey(ax):
"""
Remove sharing from an axes.
By <NAME>
"""
ax._linker.unlink(ax)
ax._linker = None
class Linker(object):
"""
Keeps y-limits of a sequence of axes in sync when panning/zooming.
By <NAME>
"""
def __init__(self, axes):
self.axes = axes
self._cids = {}
for ax in self.axes:
self.link(ax)
def unlink(self, ax):
ax.callbacks.disconnect(self._cids.pop(ax))
def link(self, ax):
self._cids[ax] = ax.callbacks.connect('ylim_changed', self.rescale)
def rescale(self, axes):
limits = axes.yaxis._scale.get_transform().transform(axes.get_ylim())
for ax in self.axes:
lim = ax.yaxis._scale.get_transform().inverted().transform(limits)
ax.set_ylim(lim, emit=False, auto=None)
# Note - This is specifically for this application!
fix_ticks(ax)
def fix_ticks(ax):
"""
Center ticklabels and hide any outside axes limits.
By <NAME>
"""
plt.setp(ax.get_yticklabels(), ha='center', x=0.5,
transform=ax._yaxis_transform)
# We'll still wind up with some tick labels beyond axes limits for reasons
# I don't fully understand...
limits = ax.get_ylim()
for label, loc in zip(ax.yaxis.get_ticklabels(), ax.yaxis.get_ticklocs()):
if loc < min(limits) or loc > max(limits):
label.set(visible=False)
else:
label.set(visible=True)
def flatten_list(l):
"""
Unpacks lists in a list:
[1, 2, [3, 4], [5, [6, 7]]]
becomes
[1, 2, 3, 4, 5, 6, 7]
http://stackoverflow.com/a/12472564/3381305
"""
if (l == []) or (l is None):
return l
if isinstance(l[0], list):
return flatten_list(l[0]) + flatten_list(l[1:])
return l[:1] + flatten_list(l[1:])
def list_and_add(a, b):
"""
Concatenate anything into a list.
Args:
a: the first thing
b: the second thing
Returns:
list. All the things in a list.
"""
if not isinstance(b, list):
b = [b]
if not isinstance(a, list):
a = [a]
return a + b
def lasio_get(l,
section,
item,
attrib='value',
default=None,
remap=None,
funcs=None):
"""
Grabs, renames and transforms stuff from a lasio object.
Args:
l (lasio): a lasio instance.
section (str): The LAS section to grab from, eg ``well``
item (str): The item in the LAS section to grab from, eg ``name``
attrib (str): The attribute of the item to grab, eg ``value``
default (str): What to return instead.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
The transformed item.
"""
remap = remap or {}
item_to_fetch = remap.get(item, item)
if item_to_fetch is None:
return None
try:
obj = getattr(l, section)
result = getattr(obj, item_to_fetch)[attrib]
except:
return default
if funcs is not None:
f = funcs.get(item, null)
result = f(result)
return result
def parabolic(f, x):
"""
Interpolation. From ageobot, from somewhere else.
"""
xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x
yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x)
return (xv, yv)
def linear(u, v, d):
"""
Linear interpolation.
Args:
u (float)
v (float)
d (float): the relative distance between the two to return.
Returns:
float. The interpolated value.
"""
return u + d*(v-u)
def find_nearest(a, value, index=False):
"""
Find the array value, or index of the array value, closest to some given
value.
Args:
a (ndarray)
value (float)
index (bool): whether to return the index instead of the array value.
Returns:
float. The array value (or index, as int) nearest the specified value.
"""
i = np.abs(a - value).argmin()
if index:
return i
else:
return a[i]
def find_previous(a, value, index=False, return_distance=False):
"""
Find the nearest array value, or index of the array value, before some
given value. Optionally also return the fractional distance of the given
value from that previous value.
Args:
a (ndarray)
value (float)
index (bool): whether to return the index instead of the array value.
Default: False.
return_distance(bool): whether to return the fractional distance from
the nearest value to the specified value. Default: False.
Returns:
float. The array value (or index, as int) before the specified value.
If ``return_distance==True`` then a tuple is returned, where the
second value is the distance.
"""
b = a - value
i = np.where(b > 0)[0][0]
d = (value - a[i-1]) / (a[i] - a[i-1])
if index:
if return_distance:
return i - 1, d
else:
return i - 1
else:
if return_distance:
return a[i - 1], d
else:
return a[i - 1]
def find_edges(a):
"""
Return two arrays: one of the changes, and one of the values.
Returns:
tuple: Two ndarrays, tops and values.
"""
edges = a[1:] == a[:-1]
tops = np.where(~edges)[0] + 1
tops = np.append(0, tops)
values = a[tops]
return tops, values
def rms(a):
"""
From ``bruges``
Calculates the RMS of an array.
:param a: An array.
:returns: The RMS of the array.
"""
return np.sqrt(np.sum(a**2.0)/a.size)
def normalize(a, new_min=0.0, new_max=1.0):
"""
From ``bruges``
Normalize an array to [0,1] or to arbitrary new min and max.
Args:
a (ndarray)
new_min (float): the new min, default 0.
new_max (float): the new max, default 1.
Returns:
ndarray. The normalized array.
"""
n = (a - np.amin(a)) / np.amax(a - np.amin(a))
return n * (new_max - new_min) + new_min
def moving_average(a, length, mode='valid'):
"""
From ``bruges``
Computes the mean in a moving window. Naive implementation.
Example:
>>> test = np.array([1,9,9,9,9,9,9,2,3,9,2,2,3,1,1,1,1,3,4,9,9,9,8,3])
>>> moving_average(test, 7, mode='same')
[ 4.42857143, 5.57142857, 6.71428571, 7.85714286, 8. ,
7.14285714, 7.14285714, 6.14285714, 5.14285714, 4.28571429,
3.14285714, 3. , 2.71428571, 1.57142857, 1.71428571,
2. , 2.85714286, 4. , 5.14285714, 6.14285714,
6.42857143, 6.42857143, 6.28571429, 5.42857143]
TODO:
Other types of average.
"""
pad = np.floor(length/2)
if mode == 'full':
pad *= 2
pad = int(pad)
# Make a padded version, paddding with first and last values
r = np.zeros(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1, ...] + out[1:, ...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out
def moving_avg_conv(a, length):
"""
From ``bruges``
Moving average via convolution. Seems slower than naive.
"""
boxcar = np.ones(length)/length
return np.convolve(a, boxcar, mode="same")
def nan_idx(y):
"""Helper to handle indices and logical indices of NaNs.
From https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
Args:
y (ndarray): 1D array with possible NaNs
Returns:
nans, logical indices of NaNs
index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def extrapolate(a):
"""
From ``bruges``
Extrapolate up and down an array from the first and last non-NaN samples.
E.g. Continue the first and last non-NaN values of a log up and down.
"""
nans = np.where(~np.isnan(a))[0]
first, last = nans[0], nans[-1]
a[:first] = a[first]
a[last + 1:] = a[last]
return a
def top_and_tail(a):
"""
Remove the NaNs from the top | |
= True
elif info[1] == 0:
if tpflag and info[0]<sc:
tpflag = False
fp += 1
if tpflag and (not(info[0]<sc)):
pr[ix-1,1] = fp
indexlist = []
if self.eval_method == apMethod.interp11:
N = 11
P = 10.0
indexlist = [0]
elif self.eval_method == apMethod.interp40:
N = 40
P = N
elif self.eval_method == apMethod.interpAll:
N = gt_num
P = N
elif self.eval_method == apMethod.interpHyb11:
N = 11
P = 10.0
indexlist = [0]
elif self.eval_method == apMethod.interpHyb40:
N = 40
P = N
elif self.eval_method == apMethod.interpHyb41:
N = 41
P = 40.0
indexlist = [0]
interval = gt_num / P
t = int(tp_num / interval)
# BASE
for i in range(1,t+1):
indexlist.append(int(i * interval+0.4999)-1)
# print(len(indexlist))
precision = np.zeros(N)
for ix, ixpr in enumerate(indexlist):
precision[ix] = pr[ixpr,0] / (pr[ixpr,0] + pr[ixpr,1])
ap = 0
if ((self.eval_method == apMethod.interp11)
or (self.eval_method == apMethod.interp40)):
if indexlist==[] or indexlist[-1]+1 < tp_num:
precision[len(indexlist)] = pr[-1][0] / (pr[-1][0] + pr[-1][1])
for ix,_ in enumerate(precision):
# for ix,_ in enumerate(precision[:-1]):
precision[ix] = np.max(precision[ix:])
ap += precision[ix]
# print(precision)
ap /= N
# print(ap)
elif ((self.eval_method == apMethod.interpHyb11)
or (self.eval_method == apMethod.interpHyb40)
or (self.eval_method == apMethod.interpHyb41)):
for ix in range(len(indexlist)):
precision[ix] = np.max(precision[ix:])
ap += precision[ix]
ap = ap / N
ap_tail = 0
prec_tail = []
for ix in range(indexlist[-1],tp_num):
prec_tail.append(pr[ix][0] / (pr[ix][0] + pr[ix][1]))
prec_tail = np.array(prec_tail)
for ix in range(prec_tail.shape[0]):
prec_tail[ix] = np.max(prec_tail[ix:])
ap_tail += prec_tail[ix]
ap_tail /= gt_num
ap = ap + ap_tail
elif (self.eval_method == apMethod.interpAll):
interval = gt_num / 10
t = int(tp_num / interval)
stop_11point = int(t * interval+0.4999)-1
interval = gt_num / 40
t = int(tp_num / interval)
stop_40point = int(t * interval+0.4999)-1
# ap = 0
# for ix in range(stop_11point):
# precision[ix] = np.max(precision[ix:])
# ap += precision[ix]
# ap /= gt_num
# print(f'11points:{ap}')
# ap = 0
# for ix in range(stop_40point):
# precision[ix] = np.max(precision[ix:])
# ap += precision[ix]
# ap /= gt_num
# print(f'40points:{ap}')
ap = 0
for ix in range(tp_num):
precision[ix] = np.max(precision[ix:])
ap += precision[ix]
ap /= gt_num
# # print(f'Allpoints:{ap}')
# print(ap)
return ap
def _get_AP(self, infosmat, misc):
tp_num, fn_num, gt_num = misc
pr = self._get_pr(infosmat, tp_num)
ap = 0
if self.eval_method == apMethod.interp11:
ap = self._get_11P(pr, (tp_num,gt_num))
elif self.eval_method == apMethod.interp40:
ap = self._get_40P(pr, (tp_num,gt_num))
elif self.eval_method == apMethod.interp41:
ap = self._get_41P(pr, (tp_num,gt_num))
elif self.eval_method == apMethod.interpAll:
ap = self._get_allP(pr, (tp_num,gt_num))
elif self.eval_method == apMethod.interpHyb11:
ap = self._get_h11P(pr, (tp_num,gt_num))
elif self.eval_method == apMethod.interpHyb40:
ap = self._get_h40P(pr, (tp_num,gt_num))
elif self.eval_method == apMethod.interpHyb41:
ap = self._get_h41P(pr, (tp_num,gt_num))
return ap
@staticmethod
def _get_pr(infosmat, tp_num):
pr = np.zeros((tp_num,3),dtype=np.float64)
tp, fp, sc, ix = 0, 0, 0, 0
tpflag = False
for info in infosmat:
if ((info[1] == 1)):
tp += 1
sc = info[0]
pr[ix,0] = tp
pr[ix,1] = fp
ix += 1
tpflag = True
elif info[1] == 0:
if tpflag and info[0]<sc:
tpflag = False
fp += 1
if tpflag and (not(info[0]<sc)):
pr[ix-1,1] = fp
pr[:,2] = pr[:,0] / (pr[:,0] + pr[:,1])
return pr
@staticmethod
def _get_11P(pr, misc):
tp_num, gt_num = misc
num_interval = 11.0
interP = np.linspace(0,1,num=11,endpoint=True)
tp_per_ip = gt_num / 10
num_ip = int(tp_num / tp_per_ip)
indexlist = [0]
for i in range(1,num_ip+1):
indexlist.append(int(interP[i]*gt_num + 0.4999)-1)
if indexlist[-1] != tp_num-1: indexlist.append(tp_num-1)
prmax = []
for index in indexlist:
prmax.append(pr[index][2])
prmax = np.array(prmax)
ap = 0
for index,_ in enumerate(prmax):
ap += np.max(prmax[index:])
ap /= num_interval
return ap
@staticmethod
def _get_40P(pr, misc):
tp_num, gt_num = misc
num_interval = 40.0
interP = np.linspace(1/40,1,num=40,endpoint=True)
tp_per_ip = gt_num / 40
num_ip = int(tp_num / tp_per_ip)
indexlist = []
for i in range(num_ip):
indexlist.append(int(interP[i]*gt_num + 0.4999)-1)
if indexlist==[] or indexlist[-1] != tp_num-1: indexlist.append(tp_num-1)
prmax = []
for index in indexlist:
prmax.append(pr[index][2])
prmax = np.array(prmax)
ap = 0
for index,_ in enumerate(prmax):
ap += np.max(prmax[index:])
ap /= num_interval
return ap
@staticmethod
def _get_41P(pr, misc):
tp_num, gt_num = misc
num_interval = 41.0
interP = np.linspace(0,1,num=41,endpoint=True)
tp_per_ip = gt_num / 41
num_ip = int(tp_num / tp_per_ip)
indexlist = []
for i in range(num_ip):
indexlist.append(int(interP[i]*gt_num + 0.4999)-1)
if indexlist==[] or indexlist[-1] != tp_num-1: indexlist.append(tp_num-1)
prmax = []
for index in indexlist:
prmax.append(pr[index][2])
prmax = np.array(prmax)
ap = 0
for index,_ in enumerate(prmax):
ap += np.max(prmax[index:])
ap /= num_interval
return ap
@staticmethod
def _get_allP(pr, misc):
tp_num, gt_num = misc
ap = 0
for index,_ in enumerate(pr):
ap += np.max(pr[index:,2])
ap /= gt_num
return ap
@staticmethod
def _get_h11P(pr, misc):
tp_num, gt_num = misc
num_interval = 11.0
interP = np.linspace(0,1,num=11,endpoint=True)
tp_per_ip = gt_num / 11
num_ip = int(tp_num / tp_per_ip)
indexlist = []
for i in range(num_ip):
indexlist.append(int(interP[i]*gt_num + 0.4999)-1)
prmax = []
for index in indexlist:
prmax.append(pr[index][2])
prmax = np.array(prmax)
ap = 0
for index,_ in enumerate(prmax):
ap += np.max(prmax[index:])
ap /= num_interval
ap2 = 0
lp = -1 + int(num_ip * tp_per_ip + 0.4999)
if lp != tp_num:
for index in range(lp+1,tp_num):
ap2 += np.max(pr[index:,2])
ap2 /= gt_num
ap += ap2
return ap
@staticmethod
def _get_h40P(pr, misc):
tp_num, gt_num = misc
num_interval = 40.0
interP = np.linspace(1/40,1,num=40,endpoint=True)
tp_per_ip = gt_num / 40
num_ip = int(tp_num / tp_per_ip)
indexlist = []
for i in range(num_ip):
indexlist.append(int(interP[i]*gt_num + 0.4999)-1)
prmax = []
for index in indexlist:
prmax.append(pr[index][2])
prmax = np.array(prmax)
ap = 0
for index,_ in enumerate(prmax):
ap += np.max(prmax[index:])
ap /= num_interval
ap2 = 0
lp = -1 + int(num_ip * tp_per_ip + 0.4999)
if lp != tp_num:
for index in range(lp+1,tp_num):
ap2 += np.max(pr[index:,2])
ap2 /= gt_num
ap += ap2
return ap
@staticmethod
def _get_h41P(pr, misc):
tp_num, gt_num = misc
num_interval = 41.0
interP = np.linspace(0,1,num=41,endpoint=True)
tp_per_ip = gt_num / 41
num_ip = int(tp_num / tp_per_ip)
indexlist = []
for i in range(num_ip):
indexlist.append(int(interP[i]*gt_num + 0.4999)-1)
prmax = []
for index in indexlist:
prmax.append(pr[index][2])
prmax = np.array(prmax)
ap = 0
for index,_ in enumerate(prmax):
ap += np.max(prmax[index:])
ap /= num_interval
ap2 = 0
lp = -1 + int(num_ip * tp_per_ip + 0.4999)
if lp != tp_num:
for index in range(lp+1,tp_num):
ap2 += np.max(pr[index:,2])
ap2 /= gt_num
ap += ap2
return ap
def _prepare_data(self, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(self.gt_annos)):
rets = self._clean_data(self.gt_annos[i], self.dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[self.gt_annos[i]['bbox'], self.gt_annos[i]['alpha'][..., np.newaxis]], 1)
dt_datas = np.concatenate([
self.dt_annos[i]['bbox'], self.dt_annos[i]['alpha'][..., np.newaxis],
self.dt_annos[i]['score'][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt)
@staticmethod
def _clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = ['car', 'pedestrian', 'cyclist']
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno['name'])
num_dt = len(dt_anno['name'])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno['bbox'][i]
gt_name = gt_anno['name'][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == 'Pedestrian'.lower()
and 'Person_sitting'.lower() == gt_name):
valid_class = 0
elif (current_cls_name == 'Car'.lower() and 'Van'.lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno['occluded'][i] > MAX_OCCLUSION[difficulty])
or (gt_anno['truncated'][i] > MAX_TRUNCATION[difficulty])
or (height <= MIN_HEIGHT[difficulty])):
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno['name'][i] == 'DontCare':
dc_bboxes.append(gt_anno['bbox'][i])
for i in range(num_dt):
if (dt_anno['name'][i].lower() == current_cls_name):
| |
<reponame>ericphanson/arxiv-search
import os
import json
import time
import pickle
import argparse
import dateutil.parser
from dateutil.tz import tzutc
from datetime import datetime, timedelta
from pytz import timezone
import copy
from random import shuffle, randrange, uniform
from flask.json import jsonify
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from flask_limiter import Limiter
from werkzeug import check_password_hash, generate_password_hash
import re
from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch.helpers import streaming_bulk, bulk, parallel_bulk
import elasticsearch
from itertools import islice
import certifi
from elasticsearch_dsl import Search, Q, A, Mapping
from elasticsearch_dsl import FacetedSearch, TermsFacet, RangeFacet, DateHistogramFacet
from elasticsearch_dsl.query import MultiMatch, Match, DisMax
from aws_requests_auth.aws_auth import AWSRequestsAuth
from cmreslogging.handlers import CMRESHandler
import requests
from requests_aws4auth import AWS4Auth
from pyparsing import Word, alphas, Literal, Group, Suppress, OneOrMore, oneOf
import threading
# -----------------------------------------------------------------------------
stop_words = ["the", "of", "and", "in", "a", "to", "we", "for", "mathcal", "can", "is", "this", "with", "by", "that", "as", "to"]
root_dir = os.path.join(".")
def key_dir(file): return os.path.join(root_dir,"server","keys",file)
def server_dir(file): return os.path.join(root_dir,"server", file)
def shared_dir(file): return os.path.join(root_dir,"shared", file)
database_path = os.path.join(root_dir,"server", 'user_db', 'as.db')
schema_path = os.path.join(root_dir,"server", 'user_db', 'schema.sql')
def strip_version(idstr):
""" identity function if arxiv id has no version, otherwise strips it. """
parts = idstr.split('v')
return parts[0]
# "1511.08198v1" is an example of a valid arxiv id that we accept
def isvalidid(pid):
return re.match('^([a-z]+(-[a-z]+)?/)?\d+(\.\d+)?(v\d+)?$', pid)
# database configuration
if os.path.isfile(key_dir('secret_key.txt')):
SECRET_KEY = open(key_dir('secret_key.txt'), 'r').read()
else:
SECRET_KEY = 'devkey, should be in a file'
# AWS_ACCESS_KEY = open(key_dir('AWS_ACCESS_KEY.txt'), 'r').read().strip()
# AWS_SECRET_KEY = open(key_dir('AWS_SECRET_KEY.txt'), 'r').read().strip()
ES_USER = open(key_dir('ES_USER.txt'), 'r').read().strip()
ES_PASS = open(key_dir('ES_PASS.txt'), 'r').read().strip()
es_host = es_host = '0638598f91a536280b20fd25240980d2.us-east-1.aws.found.io'
# log_AWS_ACCESS_KEY = open(key_dir('log_AWS_ACCESS_KEY.txt'), 'r').read().strip()
# log_AWS_SECRET_KEY = open(key_dir('log_AWS_SECRET_KEY.txt'), 'r').read().strip()
CLOUDFRONT_URL = 'https://d3dq07j9ipgft2.cloudfront.net/'
with open(shared_dir("all_categories.json"), 'r') as cats:
CATS_JSON = json.load(cats)
ALL_CATEGORIES = [ cat['c'] for cat in CATS_JSON]
# jwskey = jwk.JWK.generate(kty='oct', size=256)
cache_key = open(key_dir('cache_key.txt'), 'r').read().strip()
AUTO_CACHE = False
user_features = True
user_interactivity = False
print('read in AWS keys')
app = Flask(__name__, static_folder=os.path.join("..","static"))
app.config.from_object(__name__)
# limiter = Limiter(app, default_limits=["1000 per hour", "20 per minute"])
# -----------------------------------------------------------------------------
# utilities for database interactions
# -----------------------------------------------------------------------------
# to initialize the database: sqlite3 as.db < schema.sql
def connect_db():
sqlite_db = sqlite3.connect(database_path)
sqlite_db.row_factory = sqlite3.Row # to return dicts rather than tuples
return sqlite_db
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
def get_username(user_id):
"""Convenience method to look up the username for a user."""
rv = query_db('select username from user where user_id = ?',
[user_id], one=True)
return rv[0] if rv else None
# -----------------------------------------------------------------------------
# connection handlers
# -----------------------------------------------------------------------------
@app.before_request
def before_request():
g.libids = None
# this will always request database connection, even if we dont end up using it ;\
g.db = connect_db()
# retrieve user object from the database if user_id is set
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
added = addUserSearchesToCache()
if added:
print('addUser fired from before_request')
# g.libids = None
if g.user:
if 'libids' in session:
g.libids = session['libids']
else:
update_libids()
def update_libids():
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = [strip_version(x['paper_id']) for x in user_library]
session['libids'] = libids
g.libids = libids
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
#------------------------------------------------------
# Pass data to client
#------------------------------------------------------
def render_date(timestr):
timestruct = dateutil.parser.parse(timestr)
rendered_str = '%s %s %s' % (timestruct.day, timestruct.strftime('%b'), timestruct.year)
return rendered_str
def encode_hit(p, send_images=True, send_abstracts=True):
pid = str(p['rawid'])
idvv = '%sv%d' % (p['rawid'], p['paper_version'])
struct = {}
if 'havethumb' in p:
struct['havethumb'] = p['havethumb']
struct['title'] = p['title']
struct['pid'] = idvv
struct['rawpid'] = p['rawid']
struct['category'] = p['primary_cat']
struct['authors'] = [a for a in p['authors']]
struct['link'] = p['link']
if 'abstract' in p:
struct['abstract'] = p['abstract']
# print(p.to_dict())
# exit()
if send_images:
# struct['img'] = '/static/thumbs/' + idvv.replace('/','') + '.pdf.jpg'
struct['img'] = CLOUDFRONT_URL + 'thumbs/' + pid.replace('/','') + '.pdf.jpg'
struct['tags'] = [t for t in p['cats']]
# struct['tags'] = [t['term'] for t in p['tags']]
# render time information nicely
struct['published_time'] = render_date(p['updated'])
struct['originally_published_time'] = render_date(p['published'])
# fetch amount of discussion on this paper
struct['num_discussion'] = 0
# arxiv comments from the authors (when they submit the paper)
# cc = p.get('arxiv_comment', '')
if 'arxiv_comment' in p:
cc = p['arxiv_comment']
else:
cc = ""
if len(cc) > 100:
cc = cc[:100] + '...' # crop very long comments
struct['comment'] = cc
return struct
def add_user_data_to_hit(struct):
libids = set()
if g.libids:
libids = set(g.libids)
struct['in_library'] = 1 if struct['rawpid'] in libids else 0
return struct
def getResults(search):
search_dict = search.to_dict()
query_hash = make_hash(search_dict)
print(query_hash)
# query_hash = 0
have = False
with cached_queries_lock:
if query_hash in cached_queries:
d = cached_queries[query_hash]
list_of_ids = d["list_of_ids"]
meta = d["meta"]
have = True
# temp disable caching
# print("remember, caching disabled for testing")
# have = False
if not have:
es_response = search.execute()
meta = get_meta_from_response(es_response)
list_of_ids = process_query_to_cache(query_hash, es_response, meta)
with cached_docs_lock:
records = []
for _id in list_of_ids:
doc = cached_docs[_id]
if list_of_ids[_id]:
if "score" in list_of_ids[_id]:
doc.update({'score' : list_of_ids[_id]["score"]})
if "explain_sentence" in list_of_ids[_id]:
doc.update({'explain_sentence' : list_of_ids[_id]["explain_sentence"]})
records.append(doc)
records = [add_user_data_to_hit(r) for r in records]
return records, meta
# def test_hash_speed():
# {'size': 10, 'query': {'match_all': {}}, 'sort': [{'updated': {'order': 'desc'}}], 'from': 0}
# -----------------------------------------------------------------------------
# Build and filter query
# -----------------------------------------------------------------------------
def cat_filter(groups_of_cats):
filt_q = Q()
for group in groups_of_cats:
if len(group)==1:
filt_q = filt_q & Q('term', cats=group[0])
elif len(group) > 1:
# perform an OR filter among the different categories in this group
filt_q = filt_q & Q('terms', cats=group)
return filt_q
def prim_filter(prim_cat):
filt_q = Q()
if prim_cat != "any":
filt_q = Q('term', primary_cat=prim_cat)
return filt_q
def time_filter(time):
filt_q = Q()
if time == "all":
return filt_q
if time in ["3days" , "week" , "day" , "month" , "year"]:
filt_q = filt_q & getTimeFilterQuery(time)
else:
filt_q = filt_q & Q('range', updated={'gte': time['start'] })
filt_q = filt_q & Q('range', updated={'lte': time['end'] })
return filt_q
def ver_filter(v1):
filt_q = Q()
if v1:
filt_q = filt_q & Q('term', paper_version=1)
return filt_q
def lib_filter(only_lib):
filt_q = Q()
if only_lib:
# filt_q = Q('ids', type="paper", values= papers_from_library())
pids = ids_from_library()
if pids:
filt_q = Q('bool', filter=[Q('terms', _id=pids)])
# filt_q = filt_q & Q('term', paper_version=1)
return filt_q
def extract_query_params(query_info):
query_info = sanitize_query_object(query_info)
search = Search(using=es, index='arxiv_pointer')
tune_dict = None
weights = None
pair_fields = None
if 'rec_tuning' in query_info:
if query_info['rec_tuning'] is not None:
rec_tuning = query_info['rec_tuning']
weights = rec_tuning.pop('weights', None)
pair_fields= rec_tuning.pop('pair_fields', None)
tune_dict = rec_tuning
# add query
auth_query = None
query_text = None
sim_to_ids = None
rec_lib = False
bad_search = False
lib_ids = ids_from_library()
if 'rec_lib' in query_info:
rec_lib = query_info['rec_lib']
if query_info['query'].strip() != '':
query_text = query_info['query'].strip()
if 'author' in query_info:
if query_info['author'].strip() != '':
auth_query = query_info['author'].strip()
if 'sim_to' in query_info:
sim_to_ids = query_info['sim_to']
if (not sim_to_ids) and (not rec_lib):
search = search.extra(explain=True)
queries = []
# if query_text:
# queries.append(get_simple_search_query(query_text, weights = weights))
if rec_lib:
if lib_ids:
queries.append(get_sim_to_query(lib_ids, tune_dict = tune_dict, weights = weights))
else:
bad_search = True
if sim_to_ids:
queries.append(get_sim_to_query(sim_to_ids, tune_dict = tune_dict, weights = weights))
if query_text:
# search = search.sort('_score')
# print("sorting by score")
if len(queries) > 0:
q = Q("bool", must=get_simple_search_query(query_text), should = queries)
search = search.query(q)
else:
q = get_simple_search_query(query_text)
search = search.query(q)
else:
if len(queries) > 0:
if len(queries) == 1:
q = queries[0]
else:
q = Q("bool", should = queries)
search = search.query(q)
else:
search = search.sort('-updated')
# if rec_lib and lib_ids:
# re_q = get_sim_to_query(lib_ids, tune_dict = {'max_query_terms' : 25, 'minimum_should_match': '1%'})
# search = search.extra(rescore={'window_size': 100, "query": {"rescore_query": re_q.to_dict()}})
# print('%s queries' % len(queries))
# if not (queries):
# search = search.sort('-updated')
# elif len(queries)==1:
# print(queries)
# q = queries[0]
# search = search.query(q)
# elif len(queries)>1:
# if query_text:
# q = Q("bool", must=get_simple_search_query(query_text), should = queries, disable_coord =True)
# search = search.query(q)
# print('search dict:')
# print(search.to_dict())
# get filters
Q_lib = Q()
if 'only_lib' in query_info:
Q_lib = lib_filter(query_info['only_lib'])
# print('lib_ids = %s' % lib_ids)
if query_info['only_lib'] | |
3},
'Putnam': {'pop': 21218, 'tracts': 5},
'Quitman': {'pop': 2513, 'tracts': 1},
'Rabun': {'pop': 16276, 'tracts': 5},
'Randolph': {'pop': 7719, 'tracts': 2},
'Richmond': {'pop': 200549, 'tracts': 47},
'Rockdale': {'pop': 85215, 'tracts': 15},
'Schley': {'pop': 5010, 'tracts': 2},
'Screven': {'pop': 14593, 'tracts': 5},
'Seminole': {'pop': 8729, 'tracts': 3},
'Spalding': {'pop': 64073, 'tracts': 12},
'Stephens': {'pop': 26175, 'tracts': 5},
'Stewart': {'pop': 6058, 'tracts': 2},
'Sumter': {'pop': 32819, 'tracts': 8},
'Talbot': {'pop': 6865, 'tracts': 3},
'Taliaferro': {'pop': 1717, 'tracts': 1},
'Tattnall': {'pop': 25520, 'tracts': 5},
'Taylor': {'pop': 8906, 'tracts': 3},
'Telfair': {'pop': 16500, 'tracts': 3},
'Terrell': {'pop': 9315, 'tracts': 4},
'Thomas': {'pop': 44720, 'tracts': 11},
'Tift': {'pop': 40118, 'tracts': 9},
'Toombs': {'pop': 27223, 'tracts': 6},
'Towns': {'pop': 10471, 'tracts': 3},
'Treutlen': {'pop': 6885, 'tracts': 2},
'Troup': {'pop': 67044, 'tracts': 14},
'Turner': {'pop': 8930, 'tracts': 2},
'Twiggs': {'pop': 9023, 'tracts': 2},
'Union': {'pop': 21356, 'tracts': 6},
'Upson': {'pop': 27153, 'tracts': 7},
'Walker': {'pop': 68756, 'tracts': 13},
'Walton': {'pop': 83768, 'tracts': 15},
'Ware': {'pop': 36312, 'tracts': 9},
'Warren': {'pop': 5834, 'tracts': 2},
'Washington': {'pop': 21187, 'tracts': 5},
'Wayne': {'pop': 30099, 'tracts': 6},
'Webster': {'pop': 2799, 'tracts': 2},
'Wheeler': {'pop': 7421, 'tracts': 2},
'White': {'pop': 27144, 'tracts': 5},
'Whitfield': {'pop': 102599, 'tracts': 18},
'Wilcox': {'pop': 9255, 'tracts': 4},
'Wilkes': {'pop': 10593, 'tracts': 4},
'Wilkinson': {'pop': 9563, 'tracts': 3},
'Worth': {'pop': 21679, 'tracts': 5}},
'HI': {'Hawaii': {'pop': 185079, 'tracts': 34},
'Honolulu': {'pop': 953207, 'tracts': 244},
'Kalawao': {'pop': 90, 'tracts': 1},
'Kauai': {'pop': 67091, 'tracts': 16},
'Maui': {'pop': 154834, 'tracts': 37}},
'IA': {'Adair': {'pop': 7682, 'tracts': 3},
'Adams': {'pop': 4029, 'tracts': 2},
'Allamakee': {'pop': 14330, 'tracts': 5},
'Appanoose': {'pop': 12887, 'tracts': 5},
'Audubon': {'pop': 6119, 'tracts': 3},
'Benton': {'pop': 26076, 'tracts': 7},
'<NAME>': {'pop': 131090, 'tracts': 38},
'Boone': {'pop': 26306, 'tracts': 7},
'Bremer': {'pop': 24276, 'tracts': 8},
'Buchanan': {'pop': 20958, 'tracts': 6},
'<NAME>': {'pop': 20260, 'tracts': 6},
'Butler': {'pop': 14867, 'tracts': 5},
'Calhoun': {'pop': 9670, 'tracts': 4},
'Carroll': {'pop': 20816, 'tracts': 6},
'Cass': {'pop': 13956, 'tracts': 5},
'Cedar': {'pop': 18499, 'tracts': 5},
'<NAME>': {'pop': 44151, 'tracts': 11},
'Cherokee': {'pop': 12072, 'tracts': 4},
'Chickasaw': {'pop': 12439, 'tracts': 4},
'Clarke': {'pop': 9286, 'tracts': 3},
'Clay': {'pop': 16667, 'tracts': 4},
'Clayton': {'pop': 18129, 'tracts': 6},
'Clinton': {'pop': 49116, 'tracts': 12},
'Crawford': {'pop': 17096, 'tracts': 5},
'Dallas': {'pop': 66135, 'tracts': 15},
'Davis': {'pop': 8753, 'tracts': 2},
'Decatur': {'pop': 8457, 'tracts': 3},
'Delaware': {'pop': 17764, 'tracts': 4},
'<NAME>': {'pop': 40325, 'tracts': 11},
'Dickinson': {'pop': 16667, 'tracts': 5},
'Dubuque': {'pop': 93653, 'tracts': 26},
'Emmet': {'pop': 10302, 'tracts': 4},
'Fayette': {'pop': 20880, 'tracts': 7},
'Floyd': {'pop': 16303, 'tracts': 5},
'Franklin': {'pop': 10680, 'tracts': 3},
'Fremont': {'pop': 7441, 'tracts': 3},
'Greene': {'pop': 9336, 'tracts': 4},
'Grundy': {'pop': 12453, 'tracts': 4},
'Guthrie': {'pop': 10954, 'tracts': 3},
'Hamilton': {'pop': 15673, 'tracts': 5},
'Hancock': {'pop': 11341, 'tracts': 4},
'Hardin': {'pop': 17534, 'tracts': 6},
'Harrison': {'pop': 14928, 'tracts': 5},
'Henry': {'pop': 20145, 'tracts': 5},
'Howard': {'pop': 9566, 'tracts': 3},
'Humboldt': {'pop': 9815, 'tracts': 4},
'Ida': {'pop': 7089, 'tracts': 3},
'Iowa': {'pop': 16355, 'tracts': 4},
'Jackson': {'pop': 19848, 'tracts': 6},
'Jasper': {'pop': 36842, 'tracts': 9},
'Jefferson': {'pop': 16843, 'tracts': 4},
'Johnson': {'pop': 130882, 'tracts': 24},
'Jones': {'pop': 20638, 'tracts': 5},
'Keokuk': {'pop': 10511, 'tracts': 4},
'Kossuth': {'pop': 15543, 'tracts': 6},
'Lee': {'pop': 35862, 'tracts': 11},
'Linn': {'pop': 211226, 'tracts': 45},
'Louisa': {'pop': 11387, 'tracts': 3},
'Lucas': {'pop': 8898, 'tracts': 4},
'Lyon': {'pop': 11581, 'tracts': 3},
'Madison': {'pop': 15679, 'tracts': 3},
'Mahaska': {'pop': 22381, 'tracts': 7},
'Marion': {'pop': 33309, 'tracts': 8},
'Marshall': {'pop': 40648, 'tracts': 10},
'Mills': {'pop': 15059, 'tracts': 5},
'Mitchell': {'pop': 10776, 'tracts': 3},
'Monona': {'pop': 9243, 'tracts': 4},
'Monroe': {'pop': 7970, 'tracts': 3},
'Montgomery': {'pop': 10740, 'tracts': 4},
'Muscatine': {'pop': 42745, 'tracts': 10},
"O'Brien": {'pop': 14398, 'tracts': 4},
'Osceola': {'pop': 6462, 'tracts': 2},
'Page': {'pop': 15932, 'tracts': 6},
'<NAME>': {'pop': 9421, 'tracts': 4},
'Plymouth': {'pop': 24986, 'tracts': 6},
'Pocahontas': {'pop': 7310, 'tracts': 3},
'Polk': {'pop': 430640, 'tracts': 98},
'Pottawattamie': {'pop': 93158, 'tracts': 30},
'Poweshiek': {'pop': 18914, 'tracts': 5},
'Ringgold': {'pop': 5131, 'tracts': 2},
'Sac': {'pop': 10350, 'tracts': 4},
'Scott': {'pop': 165224, 'tracts': 47},
'Shelby': {'pop': 12167, 'tracts': 4},
'Sioux': {'pop': 33704, 'tracts': 7},
'Story': {'pop': 89542, 'tracts': 20},
'Tama': {'pop': 17767, 'tracts': 6},
'Taylor': {'pop': 6317, 'tracts': 3},
'Union': {'pop': 12534, 'tracts': 4},
'<NAME>': {'pop': 7570, 'tracts': 2},
'Wapello': {'pop': 35625, 'tracts': 11},
'Warren': {'pop': 46225, 'tracts': 12},
'Washington': {'pop': 21704, 'tracts': 5},
'Wayne': {'pop': 6403, 'tracts': 3},
'Webster': {'pop': 38013, 'tracts': 12},
'Winnebago': {'pop': 10866, 'tracts': 3},
'Winneshiek': {'pop': 21056, 'tracts': 5},
'Woodbury': {'pop': 102172, 'tracts': 26},
'Worth': {'pop': 7598, 'tracts': 3},
'Wright': {'pop': 13229, 'tracts': 5}},
'ID': {'Ada': {'pop': 392365, 'tracts': 59},
'Adams': {'pop': 3976, 'tracts': 2},
'Bannock': {'pop': 82839, 'tracts': 22},
'<NAME>': {'pop': 5986, 'tracts': 2},
'Benewah': {'pop': 9285, 'tracts': 2},
'Bingham': {'pop': 45607, 'tracts': 8},
'Blaine': {'pop': 21376, 'tracts': 4},
'Boise': {'pop': 7028, 'tracts': 1},
'Bonner': {'pop': 40877, 'tracts': 9},
'Bonneville': {'pop': 104234, 'tracts': 21},
'Boundary': {'pop': 10972, 'tracts': 2},
'Butte': {'pop': 2891, 'tracts': 1},
'Camas': {'pop': 1117, 'tracts': 1},
'Canyon': {'pop': 188923, 'tracts': 29},
'Caribou': {'pop': 6963, 'tracts': 2},
'Cassia': {'pop': 22952, 'tracts': 6},
'Clark': {'pop': 982, 'tracts': 1},
'Clearwater': {'pop': 8761, 'tracts': 2},
'Custer': {'pop': 4368, 'tracts': 1},
'Elmore': {'pop': 27038, 'tracts': 5},
'Franklin': {'pop': 12786, 'tracts': 2},
'Fremont': {'pop': 13242, 'tracts': 3},
'Gem': {'pop': 16719, 'tracts': 3},
'Gooding': {'pop': 15464, 'tracts': 2},
'Idaho': {'pop': 16267, 'tracts': 5},
'Jefferson': {'pop': 26140, 'tracts': 4},
'Jerome': {'pop': 22374, 'tracts': 5},
'Kootenai': {'pop': 138494, 'tracts': 25},
'Latah': {'pop': 37244, 'tracts': 7},
'Lemhi': {'pop': 7936, 'tracts': 3},
'Lewis': {'pop': 3821, 'tracts': 3},
'Lincoln': {'pop': 5208, 'tracts': 1},
'Madison': {'pop': 37536, 'tracts': 6},
'Minidoka': {'pop': 20069, 'tracts': 5},
'<NAME>': {'pop': 39265, 'tracts': 10},
'Oneida': {'pop': 4286, 'tracts': 1},
'Owyhee': {'pop': 11526, 'tracts': 3},
'Payette': {'pop': 22623, 'tracts': 4},
'Power': {'pop': 7817, 'tracts': 2},
'Shoshone': {'pop': 12765, 'tracts': 3},
'Teton': {'pop': 10170, 'tracts': 1},
'<NAME>': {'pop': 77230, 'tracts': 14},
'Valley': {'pop': 9862, 'tracts': 3},
'Washington': {'pop': 10198, 'tracts': 3}},
'IL': {'Adams': {'pop': 67103, 'tracts': 18},
'Alexander': {'pop': 8238, 'tracts': 4},
'Bond': {'pop': 17768, 'tracts': 4},
'Boone': {'pop': 54165, 'tracts': 7},
'Brown': {'pop': 6937, 'tracts': 2},
'Bureau': {'pop': 34978, 'tracts': 10},
'Calhoun': {'pop': 5089, 'tracts': 2},
'Carroll': {'pop': 15387, 'tracts': 6},
'Cass': {'pop': 13642, 'tracts': 5},
'Champaign': {'pop': 201081, 'tracts': 43},
'Christian': {'pop': 34800, 'tracts': 10},
'Clark': {'pop': 16335, 'tracts': 4},
'Clay': {'pop': 13815, 'tracts': 4},
'Clinton': {'pop': 37762, 'tracts': 8},
'Coles': {'pop': 53873, 'tracts': 12},
'Cook': {'pop': 5194675, 'tracts': 1318},
'Crawford': {'pop': 19817, 'tracts': 6},
'Cumberland': {'pop': 11048, 'tracts': 3},
'<NAME>': {'pop': 16561, 'tracts': 5},
'DeKalb': {'pop': 105160, 'tracts': 21},
'Douglas': {'pop': 19980, 'tracts': 5},
'DuPage': {'pop': 916924, 'tracts': 216},
'Edgar': {'pop': 18576, 'tracts': 5},
'Edwards': {'pop': 6721, 'tracts': 3},
'Effingham': {'pop': 34242, 'tracts': 8},
'Fayette': {'pop': 22140, 'tracts': 7},
'Ford': {'pop': 14081, 'tracts': 5},
'Franklin': {'pop': 39561, 'tracts': 12},
'Fulton': {'pop': 37069, 'tracts': 12},
'Gallatin': {'pop': 5589, 'tracts': 2},
'Greene': {'pop': 13886, 'tracts': 5},
'Grundy': {'pop': 50063, 'tracts': 10},
'Hamilton': {'pop': 8457, 'tracts': 3},
'Hancock': {'pop': 19104, 'tracts': 7},
'Hardin': {'pop': 4320, 'tracts': 2},
'Henderson': {'pop': 7331, 'tracts': 3},
'Henry': {'pop': 50486, 'tracts': 13},
'Iroquois': {'pop': 29718, 'tracts': 9},
'Jackson': {'pop': 60218, 'tracts': 14},
'Jasper': {'pop': 9698, 'tracts': 3},
'Jefferson': {'pop': 38827, 'tracts': 11},
'Jersey': {'pop': 22985, 'tracts': 6},
'<NAME>': {'pop': 22678, 'tracts': 6},
'Johnson': {'pop': 12582, 'tracts': 4},
'Kane': {'pop': 515269, 'tracts': 82},
'Kankakee': {'pop': 113449, 'tracts': 29},
'Kendall': {'pop': 114736, 'tracts': 10},
'Knox': {'pop': 52919, 'tracts': 16},
'<NAME>': {'pop': 113924, 'tracts': 28},
'Lake': {'pop': 703462, 'tracts': 153},
'Lawrence': {'pop': 16833, 'tracts': 5},
'Lee': {'pop': 36031, 'tracts': 9},
'Livingston': {'pop': 38950, 'tracts': 10},
'Logan': {'pop': 30305, 'tracts': 8},
'Macon': {'pop': 110768, 'tracts': 34},
'Macoupin': {'pop': 47765, 'tracts': 13},
'Madison': {'pop': 269282, 'tracts': 61},
'Marion': {'pop': 39437, 'tracts': 12},
'Marshall': {'pop': 12640, 'tracts': 5},
'Mason': {'pop': 14666, 'tracts': 6},
'Massac': {'pop': 15429, 'tracts': 4},
'McDonough': {'pop': 32612, 'tracts': 10},
'McHenry': {'pop': 308760, 'tracts': 52},
'McLean': {'pop': 169572, 'tracts': 41},
| |
# -*- coding: utf-8 -*-
import os
import time
import numpy as np
from pathlib import Path
import h5py
from tqdm import tqdm
class H5DataSet:
"""
Represents an abstract HDF5 dataset.
This class allows the user to read headers and sensor data from hdf5 files
into numpy arrays.
Input params:
"""
def __init__(self, data_folder_path, file_path,
min_sequence_length, target_sequence_length):
self.data_folder_path = data_folder_path
self.file_path = file_path
self.min_sequence_length = min_sequence_length
self.target_sequence_length = target_sequence_length
assert(Path(self.data_folder_path).is_dir())
def get_headers(self):
with h5py.File(self.file_path, "r") as f:
headers = list(f.keys())
return headers
def get_weight_configs(self):
with h5py.File(self.file_path, "r") as f:
data = f.get("data")
weight_configurations = list(data.keys())
return weight_configurations
def get_sensor_readings(self, npy_file_name, save_npy,
downsample_factor, zone_ice):
"""
This method stacks the 27 sensor readings at each weight configuration.
It gets only those time series with at least "length_time_series"
no. of points.
Returns: np array
sensor_columns_data with shape (n, m)
n: total time steps (9601 for each weight configuration)
m: measured sensor variables
"""
with h5py.File(self.file_path, "r") as f:
data = f.get("data")
weight_configurations = list(data.keys())
# stack sensor readings for weight configs
sensor_columns_data = []
sampled_weight_configurations = []
# define normal and abnormal configurations (2 class anomaly detection problem)
normal_configs = ["_0.0-0.0-0.0"]
# define abnormal configs based on specified zone
if zone_ice == 3:
abnormal_configs = ['_0.0-0.0-0.4', '_0.0-0.0-0.8', '_0.0-0.0-1.2',
'_0.0-0.0-1.6', '_0.0-0.0-2.0', '_0.0-0.0-2.4',
'_0.0-0.0-2.8', '_0.0-0.0-3.2', '_0.0-0.0-3.6',
'_0.0-0.0-4.0']
elif zone_ice == 2:
abnormal_configs = ['_0.0-0.4-0.0', '_0.0-0.8-0.0', '_0.0-1.2-0.0',
'_0.0-1.6-0.0', '_0.0-2.0-0.0', '_0.0-2.4-0.0',
'_0.0-2.8-0.0', '_0.0-3.2-0.0', '_0.0-3.6-0.0',
'_0.0-4.0-0.0']
else:
abnormal_configs = ['_0.4-0.0-0.0', '_0.8-0.0-0.0', '_1.2-0.0-0.0',
'_1.6-0.0-0.0', '_2.0-0.0-0.0', '_2.4-0.0-0.0',
'_2.8-0.0-0.0', '_3.2-0.0-0.0', '_3.6-0.0-0.0',
'_4.0-0.0-0.0']
start = time.time()
print("-----------------------------------------------------------")
print(">>>Reading sensor columns (downsample factor {0})<<<".format(downsample_factor))
for i in tqdm(range(0, len(weight_configurations))):
if i % downsample_factor == 0 and weight_configurations[i] in normal_configs \
or weight_configurations[i] in abnormal_configs:
weight_k = data.get(weight_configurations[i])
values_weight_k = np.array(weight_k) # cast hdf5 into numpy array
if values_weight_k[1001::,].shape[0] == self.target_sequence_length:
print("{}-th weight configuration".format(i), weight_configurations[i])
print("current {}-th weight configuration:".format(i), values_weight_k.shape)
print("saving {}-th weight configuration".format(i))
print("target sequence length:", self.target_sequence_length)
print("time series shape:", values_weight_k.shape)
print()
# append first 10,000 data points for each simulation
# sensor_columns_data.append(values_weight_k[1001::,])
sensor_columns_data.append(values_weight_k[0:self.min_sequence_length,])
if weight_configurations[i] in abnormal_configs:
abnormal_configs.remove(weight_configurations[i])
print("current abnormal configs:", abnormal_configs)
if not sensor_columns_data:
# TODO: This workaround is not working as expected
print("Sensor columns data list empty: NO simulation with target sequence length")
print("adding random data as a REPLACEMENT")
sensor_columns_data.append(np.random.rand(11*self.min_sequence_length, 27))
# else:
sensor_columns_data = np.vstack(sensor_columns_data)
print("sensor columns stacked data:", sensor_columns_data.shape)
elapsed_time_fl = (time.time() - start)
print("[INFO] Ellapsed time to stack sensor readings:", elapsed_time_fl)
print("-----------------")
# save data into npy format
if save_npy:
if not os.path.exists(os.path.join(self.data_folder_path, "npy_data")):
os.makedirs(os.path.join(self.data_folder_path, "npy_data"))
np.save(os.path.join(self.data_folder_path,
"npy_data", npy_file_name),
sensor_columns_data)
print("Saved sensor data into npy format...")
if downsample_factor:
return sensor_columns_data, weight_configurations
else:
return sensor_columns_data, _
# class MultiClassH5DataSet:
# """
# Represents an abstract HDF5 dataset.
#
# This class allows the user to read headers and sensor data from hdf5 files
# into numpy arrays.
#
# Input params:
# """
# def __init__(self, data_folder_path, file_path,
# min_sequence_length, target_sequence_length):
#
# self.data_folder_path = data_folder_path
# self.file_path = file_path
# self.min_sequence_length = min_sequence_length
# self.target_sequence_length = target_sequence_length
#
# assert(Path(self.data_folder_path).is_dir())
#
# def get_headers(self):
# with h5py.File(self.file_path, "r") as f:
# headers = list(f.keys())
# return headers
#
# def get_weight_configs(self):
# with h5py.File(self.file_path, "r") as f:
# data = f.get("data")
# weight_configurations = list(data.keys())
# return weight_configurations
#
# def get_sensor_readings(self, npy_file_name, save_npy, downsample_factor):
# """
# This method stacks the 27 sensor readings at each weight configuration.
# It gets only those time series with at least "length_time_series"
# no. of points.
#
# Returns: np array
# sensor_columns_data with shape (n, m)
# n: total time steps (9601 for each weight configuration)
# m: measured sensor variables
# """
# with h5py.File(self.file_path, "r") as f:
#
# data = f.get("data")
# weight_configurations = list(data.keys())
#
# # stack sensor readings for weight configs
# sensor_columns_data = []
# sampled_weight_configurations = []
#
# # define normal and abnormal configurations (2 class anomaly detection problem)
# normal_configs = ["_0.0-0.0-0.0"]
#
# # take abnormal cases from all 3 zones
# abnormal_configs = ["_0.0-0.0-0.2", '_0.0-0.0-0.4', "_0.0-0.0-0.6", '_0.0-0.0-0.8', # zone 3
# "_0.0-0.0-1.0", "_0.0-0.0-1.2", "_0.0-0.0-1.4", "_0.0-0.0-1.6",
# "_0.0-0.0-1.8", '_0.0-0.0-2.0', "_0.0-0.0-2.2", '_0.0-0.0-2.4',
# "_0.0-0.0-2.6", '_0.0-0.0-2.8', "_0.0-0.0-3.0", '_0.0-0.0-3.2',
# "_0.0-0.0-3.4", '_0.0-0.0-3.6', "_0.0-0.0-3.8", '_0.0-0.0-4.0',
# "_0.0-0.2-0.0", '_0.0-0.4-0.0', "_0.0-0.6-0.0", '_0.0-0.8-0.0', # zone 2
# "_0.0-1.0-0.0", '_0.0-1.2-0.0', "_0.0-1.4-0.0", '_0.0-1.6-0.0',
# "_0.0-1.8-0.0", '_0.0-2.0-0.0', "_0.0-2.2-0.0", '_0.0-2.4-0.0',
# "_0.0-2.6-0.0", '_0.0-2.8-0.0', "_0.0-3.0-0.0", '_0.0-3.2-0.0',
# "_0.0-3.4-0.0", '_0.0-3.6-0.0', "_0.0-3.8-0.0", '_0.0-4.0-0.0',
# "_0.2-0.0-0.0", '_0.4-0.0-0.0', "_0.6-0.0-0.0", "_0.8-0.0-0.0", # zone 1
# '_1.0-0.0-0.0', '_1.2-0.0-0.0', "_1.4-0.0-0.0", '_1.6-0.0-0.0',
# "_1.8-0.0-0.0", '_2.0-0.0-0.0', "_2.2-0.0-0.0", '_2.4-0.0-0.0',
# "_2.6-0.0-0.0", '_2.8-0.0-0.0', "_3.0-0.0-0.0", '_3.2-0.0-0.0',
# "_3.4-0.0-0.0", '_3.6-0.0-0.0', "_3.8-0.0-0.0", '_4.0-0.0-0.0']
#
# # NOTE: you are not doing anything with these lists (?)
# abnormal_configs_3 = ["_0.0-0.0-0.2", '_0.0-0.0-0.4', "_0.0-0.0-0.6", '_0.0-0.0-0.8',
# "_0.0-0.0-1.0", "_0.0-0.0-1.2", "_0.0-0.0-1.4", "_0.0-0.0-1.6",
# "_0.0-0.0-1.8", '_0.0-0.0-2.0', "_0.0-0.0-2.2", '_0.0-0.0-2.4',
# "_0.0-0.0-2.6", '_0.0-0.0-2.8', "_0.0-0.0-3.0", '_0.0-0.0-3.2',
# "_0.0-0.0-3.4", '_0.0-0.0-3.6', "_0.0-0.0-3.8", '_0.0-0.0-4.0']
#
# abnormal_configs_2 = ["_0.0-0.2-0.0", '_0.0-0.4-0.0', "_0.0-0.6-0.0", '_0.0-0.8-0.0',
# "_0.0-1.0-0.0", '_0.0-1.2-0.0', "_0.0-1.4-0.0", '_0.0-1.6-0.0',
# "_0.0-1.8-0.0", '_0.0-2.0-0.0', "_0.0-2.2-0.0", '_0.0-2.4-0.0',
# "_0.0-2.6-0.0", '_0.0-2.8-0.0', "_0.0-3.0-0.0", '_0.0-3.2-0.0',
# "_0.0-3.4-0.0", '_0.0-3.6-0.0', "_0.0-3.8-0.0", '_0.0-4.0-0.0']
#
# abnormal_configs_1 = ["_0.2-0.0-0.0", '_0.4-0.0-0.0', "_0.6-0.0-0.0", "_0.8-0.0-0.0",
# '_1.0-0.0-0.0', '_1.2-0.0-0.0', "_1.4-0.0-0.0", '_1.6-0.0-0.0',
# "_1.8-0.0-0.0", '_2.0-0.0-0.0', "_2.2-0.0-0.0", '_2.4-0.0-0.0',
# "_2.6-0.0-0.0", '_2.8-0.0-0.0', "_3.0-0.0-0.0", '_3.2-0.0-0.0',
# "_3.4-0.0-0.0", '_3.6-0.0-0.0', "_3.8-0.0-0.0", '_4.0-0.0-0.0']
#
# start = time.time()
# print("-----------------------------------------------------------")
# print(">>>Reading sensor columns (downsample factor {0})<<<".format(downsample_factor))
# for i in tqdm(range(0, len(weight_configurations))):
# if i % downsample_factor == 0 and weight_configurations[i] in normal_configs \
# or weight_configurations[i] in abnormal_configs:
#
# weight_k = data.get(weight_configurations[i])
# values_weight_k = np.array(weight_k) # cast hdf5 into numpy array
#
# # check time series has a target sequence length
# if values_weight_k[1001::,].shape[0] == self.target_sequence_length:
# print("{}-th weight configuration".format(i), weight_configurations[i])
# print("current {}-th weight configuration:".format(i), values_weight_k.shape)
# print("saving {}-th weight configuration".format(i))
# print("target sequence length:", self.target_sequence_length)
# print("time series shape:", values_weight_k.shape)
# print()
#
# # append first 10,000 data points for each simulation
# sensor_columns_data.append(values_weight_k[0:self.min_sequence_length,])
# print("sensor columns data shape:", np.shape(sensor_columns_data))
# print()
#
# if weight_configurations[i] in abnormal_configs:
# abnormal_configs.remove(weight_configurations[i])
# print("current abnormal configs:", abnormal_configs)
# print()
#
# # if array is empty it means no simulations with target sequence length
# if not sensor_columns_data:
# # TODO: This workaround is not working as expected
# print("Sensor columns data list empty: NO simulation with target sequence length")
# print("adding random data as a REPLACEMENT")
# print()
# sensor_columns_data.append(np.random.rand(11*self.min_sequence_length, 27))
#
# # else stack data into general sensor columns data
# sensor_columns_data = np.vstack(sensor_columns_data)
# print("sensor columns stacked data:", sensor_columns_data.shape)
# print()
#
# elapsed_time_fl = (time.time() - start)
# print("[INFO] Ellapsed time to stack sensor readings:", elapsed_time_fl)
# print("-----------------")
#
# # save data into npy format
# if save_npy:
# if not os.path.exists(os.path.join(self.data_folder_path, "npy_data")):
# os.makedirs(os.path.join(self.data_folder_path, "npy_data"))
#
# np.save(os.path.join(self.data_folder_path,
# "npy_data", npy_file_name),
# sensor_columns_data)
#
# print("Saved sensor data into npy format...")
#
# if downsample_factor:
# return sensor_columns_data, weight_configurations
# else:
# return sensor_columns_data, _
class MultiClassH5DataSet:
"""
Represents an abstract HDF5 dataset.
This class allows the user to read headers and sensor data from hdf5 files
into numpy arrays.
Input params:
"""
def __init__(self, data_folder_path, file_path,
min_sequence_length, target_sequence_length):
self.data_folder_path = data_folder_path
self.file_path = file_path
self.min_sequence_length = min_sequence_length
self.target_sequence_length = target_sequence_length
assert(Path(self.data_folder_path).is_dir())
def get_headers(self):
with h5py.File(self.file_path, "r") as f:
headers = list(f.keys())
return headers
def get_weight_configs(self):
with h5py.File(self.file_path, "r") as f:
data = f.get("data")
weight_configurations = list(data.keys())
return weight_configurations
def get_sensor_readings(self, downsample_factor):
"""
This method stacks the 27 sensor readings at each weight configuration.
It gets only those time series with at least "length_time_series" no. of points.
Returns normal and abnormal cases as numpy arrays.
"""
with h5py.File(self.file_path, "r") as f:
data = f.get("data")
weight_configurations = list(data.keys())
# stack sensor readings based on weight zone (return all arrays separately for easier processing)
normal_sensor_data = []
abnormal_zone3_sensor_data = []
abnormal_zone2_sensor_data = []
abnormal_zone1_sensor_data = []
sampled_weight_configurations = []
# define normal and abnormal configurations
normal_configs = ["_0.0-0.0-0.0"]
# take abnormal cases from all 3 zones
abnormal_configs = ["_0.0-0.0-0.2", '_0.0-0.0-0.4', "_0.0-0.0-0.6", '_0.0-0.0-0.8', # zone 3
"_0.0-0.0-1.0", "_0.0-0.0-1.2", "_0.0-0.0-1.4", "_0.0-0.0-1.6",
"_0.0-0.0-1.8", '_0.0-0.0-2.0', "_0.0-0.0-2.2", '_0.0-0.0-2.4',
"_0.0-0.0-2.6", '_0.0-0.0-2.8', "_0.0-0.0-3.0", '_0.0-0.0-3.2',
"_0.0-0.0-3.4", '_0.0-0.0-3.6', "_0.0-0.0-3.8", '_0.0-0.0-4.0',
"_0.0-0.2-0.0", '_0.0-0.4-0.0', "_0.0-0.6-0.0", '_0.0-0.8-0.0', # zone 2
"_0.0-1.0-0.0", '_0.0-1.2-0.0', "_0.0-1.4-0.0", '_0.0-1.6-0.0',
"_0.0-1.8-0.0", '_0.0-2.0-0.0', "_0.0-2.2-0.0", '_0.0-2.4-0.0',
"_0.0-2.6-0.0", '_0.0-2.8-0.0', "_0.0-3.0-0.0", '_0.0-3.2-0.0',
"_0.0-3.4-0.0", '_0.0-3.6-0.0', "_0.0-3.8-0.0", '_0.0-4.0-0.0',
"_0.2-0.0-0.0", '_0.4-0.0-0.0', "_0.6-0.0-0.0", "_0.8-0.0-0.0", # zone 1
'_1.0-0.0-0.0', '_1.2-0.0-0.0', "_1.4-0.0-0.0", '_1.6-0.0-0.0',
"_1.8-0.0-0.0", '_2.0-0.0-0.0', "_2.2-0.0-0.0", '_2.4-0.0-0.0',
"_2.6-0.0-0.0", '_2.8-0.0-0.0', "_3.0-0.0-0.0", '_3.2-0.0-0.0',
"_3.4-0.0-0.0", '_3.6-0.0-0.0', "_3.8-0.0-0.0", '_4.0-0.0-0.0']
abnormal_configs_3 = ["_0.0-0.0-0.2", '_0.0-0.0-0.4', "_0.0-0.0-0.6", '_0.0-0.0-0.8',
"_0.0-0.0-1.0", "_0.0-0.0-1.2", "_0.0-0.0-1.4", "_0.0-0.0-1.6",
"_0.0-0.0-1.8", '_0.0-0.0-2.0', "_0.0-0.0-2.2", '_0.0-0.0-2.4',
"_0.0-0.0-2.6", '_0.0-0.0-2.8', "_0.0-0.0-3.0", '_0.0-0.0-3.2',
"_0.0-0.0-3.4", '_0.0-0.0-3.6', "_0.0-0.0-3.8", '_0.0-0.0-4.0']
abnormal_configs_2 = ["_0.0-0.2-0.0", '_0.0-0.4-0.0', "_0.0-0.6-0.0", '_0.0-0.8-0.0',
"_0.0-1.0-0.0", '_0.0-1.2-0.0', "_0.0-1.4-0.0", '_0.0-1.6-0.0',
"_0.0-1.8-0.0", '_0.0-2.0-0.0', "_0.0-2.2-0.0", '_0.0-2.4-0.0',
"_0.0-2.6-0.0", '_0.0-2.8-0.0', "_0.0-3.0-0.0", '_0.0-3.2-0.0',
"_0.0-3.4-0.0", '_0.0-3.6-0.0', "_0.0-3.8-0.0", '_0.0-4.0-0.0']
abnormal_configs_1 = ["_0.2-0.0-0.0", '_0.4-0.0-0.0', "_0.6-0.0-0.0", "_0.8-0.0-0.0",
'_1.0-0.0-0.0', '_1.2-0.0-0.0', "_1.4-0.0-0.0", '_1.6-0.0-0.0',
"_1.8-0.0-0.0", '_2.0-0.0-0.0', "_2.2-0.0-0.0", '_2.4-0.0-0.0',
"_2.6-0.0-0.0", '_2.8-0.0-0.0', "_3.0-0.0-0.0", '_3.2-0.0-0.0',
"_3.4-0.0-0.0", '_3.6-0.0-0.0', "_3.8-0.0-0.0", '_4.0-0.0-0.0']
start = time.time()
print()
print("[INFO] Started reading hdf5 file: ", self.file_path)
for i in range(0, len(weight_configurations)):
weight_k = data.get(weight_configurations[i])
values_weight_k = np.array(weight_k) # cast hdf5 into numpy array
if i % | |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from bpy.types import Panel
from bl_ui.space_view3d import (
VIEW3D_PT_shading_lighting,
VIEW3D_PT_shading_color,
VIEW3D_PT_shading_options,
)
from bl_ui.properties_grease_pencil_common import GreasePencilSimplifyPanel
class RenderButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
class RENDER_PT_context(Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
bl_options = {'HIDE_HEADER'}
bl_label = ""
@classmethod
def poll(cls, context):
return context.scene
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
scene = context.scene
rd = scene.render
if rd.has_multiple_engines:
layout.prop(rd, "engine", text="Render Engine")
class RENDER_PT_color_management(RenderButtonsPanel, Panel):
bl_label = "Color Management"
bl_options = {'DEFAULT_CLOSED'}
bl_order = 100
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
view = scene.view_settings
flow = layout.grid_flow(row_major=True, columns=0, even_columns=False, even_rows=False, align=True)
col = flow.column()
col.prop(scene.display_settings, "display_device")
col.separator()
col.prop(view, "view_transform")
col.prop(view, "look")
col = flow.column()
col.prop(view, "exposure")
col.prop(view, "gamma")
col.separator()
col.prop(scene.sequencer_colorspace_settings, "name", text="Sequencer")
class RENDER_PT_color_management_curves(RenderButtonsPanel, Panel):
bl_label = "Use Curves"
bl_parent_id = "RENDER_PT_color_management"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
scene = context.scene
view = scene.view_settings
self.layout.prop(view, "use_curve_mapping", text="")
def draw(self, context):
layout = self.layout
scene = context.scene
view = scene.view_settings
layout.use_property_split = False
layout.use_property_decorate = False # No animation.
layout.enabled = view.use_curve_mapping
layout.template_curve_mapping(view, "curve_mapping", type='COLOR', levels=True)
class RENDER_PT_eevee_ambient_occlusion(RenderButtonsPanel, Panel):
bl_label = "Ambient Occlusion"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_gtao", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
layout.active = props.use_gtao
col = layout.column()
col.prop(props, "gtao_distance")
col.prop(props, "gtao_factor")
col.prop(props, "gtao_quality")
col.prop(props, "use_gtao_bent_normals")
col.prop(props, "use_gtao_bounce")
class RENDER_PT_eevee_motion_blur(RenderButtonsPanel, Panel):
bl_label = "Motion Blur"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_motion_blur", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
layout.active = props.use_motion_blur
col = layout.column()
col.prop(props, "motion_blur_samples")
col.prop(props, "motion_blur_shutter")
class RENDER_PT_eevee_depth_of_field(RenderButtonsPanel, Panel):
bl_label = "Depth of Field"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
col = layout.column()
col.prop(props, "bokeh_max_size")
# Not supported yet
# col.prop(props, "bokeh_threshold")
class RENDER_PT_eevee_bloom(RenderButtonsPanel, Panel):
bl_label = "Bloom"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_bloom", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
layout.active = props.use_bloom
col = layout.column()
col.prop(props, "bloom_threshold")
col.prop(props, "bloom_knee")
col.prop(props, "bloom_radius")
col.prop(props, "bloom_color")
col.prop(props, "bloom_intensity")
col.prop(props, "bloom_clamp")
class RENDER_PT_eevee_volumetric(RenderButtonsPanel, Panel):
bl_label = "Volumetrics"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
col = layout.column(align=True)
col.prop(props, "volumetric_start")
col.prop(props, "volumetric_end")
col = layout.column()
col.prop(props, "volumetric_tile_size")
col.prop(props, "volumetric_samples")
col.prop(props, "volumetric_sample_distribution", text="Distribution")
class RENDER_PT_eevee_volumetric_lighting(RenderButtonsPanel, Panel):
bl_label = "Volumetric Lighting"
bl_parent_id = "RENDER_PT_eevee_volumetric"
COMPAT_ENGINES = {'BLENDER_EEVEE'}
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_volumetric_lights", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
layout.active = props.use_volumetric_lights
layout.prop(props, "volumetric_light_clamp", text="Light Clamping")
class RENDER_PT_eevee_volumetric_shadows(RenderButtonsPanel, Panel):
bl_label = "Volumetric Shadows"
bl_parent_id = "RENDER_PT_eevee_volumetric"
COMPAT_ENGINES = {'BLENDER_EEVEE'}
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_volumetric_shadows", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
layout.active = props.use_volumetric_shadows
layout.prop(props, "volumetric_shadow_samples", text="Shadow Samples")
class RENDER_PT_eevee_subsurface_scattering(RenderButtonsPanel, Panel):
bl_label = "Subsurface Scattering"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
col = layout.column()
col.prop(props, "sss_samples")
col.prop(props, "sss_jitter_threshold")
class RENDER_PT_eevee_screen_space_reflections(RenderButtonsPanel, Panel):
bl_label = "Screen Space Reflections"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_ssr", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
col = layout.column()
col.active = props.use_ssr
col.prop(props, "use_ssr_refraction", text="Refraction")
col.prop(props, "use_ssr_halfres")
col.prop(props, "ssr_quality")
col.prop(props, "ssr_max_roughness")
col.prop(props, "ssr_thickness")
col.prop(props, "ssr_border_fade")
col.prop(props, "ssr_firefly_fac")
class RENDER_PT_eevee_shadows(RenderButtonsPanel, Panel):
bl_label = "Shadows"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
col = layout.column()
col.prop(props, "shadow_cube_size", text="Cube Size")
col.prop(props, "shadow_cascade_size", text="Cascade Size")
col.prop(props, "use_shadow_high_bitdepth")
col.prop(props, "use_soft_shadows")
col.prop(props, "light_threshold")
class RENDER_PT_eevee_sampling(RenderButtonsPanel, Panel):
bl_label = "Sampling"
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
props = scene.eevee
col = layout.column(align=True)
col.prop(props, "taa_render_samples", text="Render")
col.prop(props, "taa_samples", text="Viewport")
col = layout.column()
col.prop(props, "use_taa_reprojection")
class RENDER_PT_eevee_indirect_lighting(RenderButtonsPanel, Panel):
bl_label = "Indirect Lighting"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
props = scene.eevee
col = layout.column()
col.operator("scene.light_cache_bake", text="Bake Indirect Lighting", icon='RENDER_STILL')
col.operator("scene.light_cache_bake", text="Bake Cubemap Only", icon='LIGHTPROBE_CUBEMAP').subset = 'CUBEMAPS'
col.operator("scene.light_cache_free", text="Delete Lighting Cache")
cache_info = scene.eevee.gi_cache_info
if cache_info:
col.label(text=cache_info)
col.prop(props, "gi_auto_bake")
col.prop(props, "gi_diffuse_bounces")
col.prop(props, "gi_cubemap_resolution")
col.prop(props, "gi_visibility_resolution", text="Diffuse Occlusion")
col.prop(props, "gi_irradiance_smoothing")
col.prop(props, "gi_glossy_clamp")
col.prop(props, "gi_filter_quality")
class RENDER_PT_eevee_indirect_lighting_display(RenderButtonsPanel, Panel):
bl_label = "Display"
bl_parent_id = "RENDER_PT_eevee_indirect_lighting"
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
props = scene.eevee
row = layout.row(align=True)
row.prop(props, "gi_cubemap_display_size", text="Cubemap Size")
row.prop(props, "gi_show_cubemaps", text="", toggle=True)
row = layout.row(align=True)
row.prop(props, "gi_irradiance_display_size", text="Irradiance Size")
row.prop(props, "gi_show_irradiance", text="", toggle=True)
class RENDER_PT_eevee_film(RenderButtonsPanel, Panel):
bl_label = "Film"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
rd = scene.render
col = layout.column()
col.prop(rd, "filter_size")
col.prop(rd, "film_transparent", text="Transparent")
class RENDER_PT_eevee_film_overscan(RenderButtonsPanel, Panel):
bl_label = "Overscan"
bl_parent_id = "RENDER_PT_eevee_film"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_overscan", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
props = scene.eevee
layout.active = props.use_overscan
layout.prop(props, "overscan_size", text="Size")
class RENDER_PT_eevee_hair(RenderButtonsPanel, Panel):
bl_label = "Hair"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
scene = context.scene
rd = scene.render
layout.use_property_split = True
layout.prop(rd, "hair_type", expand=True)
layout.prop(rd, "hair_subdiv")
class RENDER_PT_eevee_performance(RenderButtonsPanel, Panel):
bl_label = "Performance"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
scene = context.scene
rd = scene.render
layout.use_property_split = True
layout.prop(rd, "use_high_quality_normals")
class RENDER_PT_opengl_sampling(RenderButtonsPanel, Panel):
bl_label = "Sampling"
COMPAT_ENGINES = {'BLENDER_WORKBENCH'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
props = scene.display
col = layout.column()
col.prop(props, "render_aa", | |
<gh_stars>0
import torch
import torch.nn.functional as F
import math
import numpy as np
from numpy.random import randn
from models.utils.kronecker import sylvester
from models.utils.cholesky import per_batch_cholesky
from models.utils.plotting import plot_confidence
from pytorch_lightning import seed_everything, LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping
from torch.utils.data import TensorDataset, DataLoader, random_split
# from tqdm import tqdm
import pykalman
from pykalman.standard import KalmanFilter, _smooth, _filter # , _loglikelihoods
from pykalman.utils import preprocess_arguments, get_params, array1d, array2d, check_random_state
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import functools
import copy
# from ignite.engine import Engine, Events
# from ignite.handlers import EarlyStopping
# from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
# from ignite.metrics import Average
# from ignite.contrib.handlers.tensorboard_logger import *
# from ignite.contrib.engines.common import setup_tb_logging
from models.utils.dataset_torch_preparator import DatasetContainer
class SmoothKalman(KalmanFilter):
def __init__(self,
length=1,
num_states=1,
num_outputs=1,
cont_precision=4,
kwd_params=dict()):
self.kappa = cont_precision
if 'kappa' in kwd_params:
self.kappa = kwd_params['kappa']
if self.kappa < 0: # TI Kalman variant
a_tensor = (1/math.sqrt(num_states))*torch.randn([num_states, num_states], dtype=torch.double).detach().numpy()
q_tensor = torch.eye(num_states, dtype=torch.double).detach().numpy()
transition_offsets = torch.zeros([num_states], dtype=torch.double).detach().numpy()
observation_offsets = torch.zeros([num_outputs], dtype=torch.double).detach().numpy()
else: # TV Kalman
a_tensor = (1/math.sqrt(num_states))*torch.randn([length-1, num_states, num_states], dtype=torch.double).detach().numpy()
q_tensor = torch.eye(num_states, dtype=torch.double).repeat(length-1, 1, 1).detach().numpy()
transition_offsets = torch.zeros([length-1, num_states], dtype=torch.double).detach().numpy()
observation_offsets = torch.zeros([length, num_outputs], dtype=torch.double).detach().numpy()
mat_b = (1/math.sqrt(num_outputs*num_states))*torch.randn([num_outputs, num_states], dtype=torch.double).detach().numpy()
mat_r = torch.eye(num_outputs, dtype=torch.double).detach().numpy()
mat_mu0 = torch.randn(torch.Size([num_states]), dtype=torch.double).detach().numpy()
mat_p0 = torch.eye(num_states, dtype=torch.double).detach().numpy()
default_kalman_params = {
'transition_matrices': a_tensor,
'observation_matrices': mat_b,
'transition_covariance': q_tensor,
'observation_covariance': mat_r,
'initial_state_mean': mat_mu0,
'initial_state_covariance': mat_p0,
'transition_offsets': transition_offsets,
'observation_offsets': observation_offsets,
'em_vars': 'all'
}
subset_params = {
x: kwd_params[x]
for x in kwd_params if x in default_kalman_params
}
merged_kalman_params = {**default_kalman_params, **subset_params}
super(SmoothKalman, self).__init__(**merged_kalman_params)
@property
def length(self):
if len(self.transition_matrices.shape) <= 2:
return 1
a_len = self.transition_matrices.shape[0]
return a_len+1
def state_dict(self):
# return vars(self)
state = {
'kappa': self.kappa,
'transition_matrices': self.transition_matrices,
'transition_covariance': self.transition_covariance,
'observation_matrices': self.observation_matrices,
'observation_covariance': self.observation_covariance,
'initial_state_mean': self.initial_state_mean,
'initial_state_covariance': self.initial_state_covariance,
'transition_offsets': self.transition_offsets,
'observation_offsets': self.observation_offsets
}
return state
def load_state_dict(self, dictionary, **kwargs):
self.kappa = dictionary['kappa']
self.transition_offsets = dictionary['transition_offsets']
self.observation_offsets = dictionary['observation_offsets']
self.update_parameters_from_np(
dictionary['transition_matrices'],
dictionary['transition_covariance'],
dictionary['observation_matrices'],
dictionary['observation_covariance'],
dictionary['initial_state_mean'],
dictionary['initial_state_covariance']
)
def trim_length(self, y: torch.Tensor) -> torch.Tensor:
'''For TI model, y is returned untouched. If y is longer than
model, shorten it to the model\'s length.
'''
assert type(y) is torch.Tensor, 'trim_length: Expected tensor input.'
assert y.dim() == 4, 'trim_length: Expected 4-dimensional input.'
if self.length == 1: # Time-Invariant version doesn't trim
return y
input_len = y.size(1)
if input_len > self.length:
return y[:, 0:self.length]
# if input_len < self.length:
# pad_right = self.length - input_len
# y_dimenzed = y[None,...].squeeze(-1)
# y_padded = F.pad(y_dimenzed, (0, 0, 0, pad_right), mode='replicate').squeeze(0).unsqueeze(-1)
# return y_padded
return y
def apply_torch(self, func, y):
"""Applies function on `torch.Tensor` batched (dim 0 is batch) y,
y is supposed to be 4-dimensional (b*Lenghth*Outs*1). Expects the
func to return an iterable.
Returns list of results (Tensored) corresponding to each batch.
"""
assert type(y) is torch.Tensor, 'trim_length: Expected tensor input.'
assert y.dim() == 4, 'smooth_torch: Expected 4-dimensional input.'
y = self.trim_length(y)
y = y.squeeze(-1)
y_np = y.detach().numpy()
result_list = []
for y_i in y_np:
result = func(y_i)
result_cast = [torch.tensor(item, dtype=torch.double) for item in result]
result_list.append(result_cast)
return result_list
def filter(self, y):
Z = self._parse_observations(y)
(predicted_state_means, predicted_state_covariances, kalman_gains,
filtered_state_means, filtered_state_covariances) = (_filter(
self.transition_matrices, self.observation_matrices, self.transition_covariance,
self.observation_covariance, self.transition_offsets, self.observation_offsets,
self.initial_state_mean, self.initial_state_covariance, Z))
return (predicted_state_means, predicted_state_covariances, kalman_gains,
filtered_state_means, filtered_state_covariances)
def smooth_torch(self, y):
"""Calculates smooth estimates of batched (dim 0 is batch) y as
`torch.Tensor`, y is supposed to be 4-dimensional `(b*Lenghth*Outs*1)`.
"""
assert type(y) is torch.Tensor, 'trim_length: Expected tensor input.'
assert y.dim() == 4, 'smooth_torch: Expected 4-dimensional input.'
mu_list = []
cov_list = []
kal_gains_list = []
y = self.trim_length(y)
y = y.squeeze(-1)
y_np = y.detach().numpy()
for y_i in y_np:
(smoothed_state_means, smoothed_state_covariances,
kalman_smoothing_gains) = self.smooth(y_i)
mu_list.append(torch.from_numpy(smoothed_state_means))
cov_list.append(torch.from_numpy(smoothed_state_covariances))
kal_gains_list.append(torch.from_numpy(kalman_smoothing_gains))
mu_tensor = torch.stack(mu_list, dim=0).unsqueeze(-1)
cov_tensor = torch.tensor(cov_list[0], dtype=torch.double)
kal_gains_tensor = torch.tensor(kal_gains_list[0], dtype=torch.double)
return mu_tensor, cov_tensor, kal_gains_tensor
def smooth(self, X):
"""Apply the Kalman Smoother
Apply the Kalman Smoother to estimate the hidden state at time
:math:`t` for :math:`t = [0...n_{\\text{timesteps}}-1]` given all
observations. See :func:`_smooth` for more complex output
Parameters
----------
X : [n_timesteps, n_dim_obs] array-like
observations corresponding to times [0...n_timesteps-1]. If `X` is
a masked array and any of `X[t]` is masked, then `X[t]` will be
treated as a missing observation.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state]
mean of hidden state distributions for times [0...n_timesteps-1]
given all observations
smoothed_state_covariances : [n_timesteps, n_dim_state]
covariances of hidden state distributions for times
[0...n_timesteps-1] given all observations
kalman_smoothing_gains : [n_timesteps-1, n_dim_state, n_dim_state] array
Kalman Smoothing correction matrices for times [0...n_timesteps-2]
"""
Z = self._parse_observations(X)
(predicted_state_means, predicted_state_covariances, _,
filtered_state_means, filtered_state_covariances) = (_filter(
self.transition_matrices, self.observation_matrices, self.transition_covariance,
self.observation_covariance, self.transition_offsets, self.observation_offsets,
self.initial_state_mean, self.initial_state_covariance, Z))
(smoothed_state_means, smoothed_state_covariances,
kalman_smoothing_gains) = (_smooth(self.transition_matrices,
filtered_state_means,
filtered_state_covariances,
predicted_state_means,
predicted_state_covariances))
return (smoothed_state_means, smoothed_state_covariances,
kalman_smoothing_gains)
def predict_output_torch(self,
y,
predicted_state_means=None,
predicted_state_covariances=None):
assert type(y) is torch.Tensor, 'trim_length: Expected tensor input.'
assert y.dim() == 4, 'smooth_torch: Expected 4-dimensional input.'
y = self.trim_length(y)
if predicted_state_means is None or predicted_state_covariances is None:
filter_list = self.apply_torch(self.filter, y)
predicted_state_means_list = [item[0] for item in filter_list]
predicted_state_covariances_list = [item[1] for item in filter_list]
predicted_state_means_tensor = torch.stack(predicted_state_means_list, dim=0).unsqueeze(-1)
predicted_state_covariances_tensor = predicted_state_covariances_list[0]
observation_matrices_tensor = torch.tensor(self.observation_matrices, dtype=torch.double)
observation_covariance_tensor = torch.tensor(self.observation_covariance, dtype=torch.double)
y_hat = observation_matrices_tensor @ predicted_state_means_tensor
sig = observation_matrices_tensor @ predicted_state_covariances_tensor @ observation_matrices_tensor.T + observation_covariance_tensor
return y_hat, sig
def log_pred_density(self,
y,
predicted_state_means=None,
predicted_state_covariances=None):
'''Args are `torch.Tensor` of size `b x N x n_outs x 1` - that is
4-dimensional.'''
assert type(y) is torch.Tensor, 'Expected tensor input.'
assert y.dim() == 4, 'Expected 4-dimensional input.'
y = self.trim_length(y)
y_hat, sig = self.predict_output_torch(
y, predicted_state_means, predicted_state_covariances)
sig_chol = per_batch_cholesky(sig)
nu = y - y_hat
loader = DataLoader(nu)
log_lik_list = []
for nu_t in loader:
nu_t_squeezed = nu_t.squeeze(0).squeeze(-1)
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
torch.zeros(nu_t_squeezed.size()),
scale_tril=sig_chol
)
log_lik = mvn.log_prob(nu_t_squeezed)
log_lik_list.append(log_lik)
return torch.stack(log_lik_list)
def loglikelihood_torch(self,
y,
predicted_state_means=None,
predicted_state_covariances=None):
"""
Calculates estimate of ELPD in the form of the log pointwise predictive density
"""
assert type(y) is torch.Tensor, 'trim_length: Expected tensor input.'
assert y.dim() == 4, 'smooth_torch: Expected 4-dimensional input.'
results = self.log_pred_density(y, predicted_state_means, predicted_state_covariances)
return results.mean()
def _initialize_parameters(self, arguments=None):
"""Retrieve parameters if they exist, else replace with defaults"""
n_dim_state, n_dim_obs = self.n_dim_state, self.n_dim_obs
arguments = get_params(super(SmoothKalman, self))
defaults = {
'transition_matrices': np.eye(n_dim_state),
'transition_offsets': np.zeros(n_dim_state),
'transition_covariance': np.eye(n_dim_state),
'observation_matrices': np.eye(n_dim_obs, n_dim_state),
'observation_offsets': np.zeros(n_dim_obs),
'observation_covariance': np.eye(n_dim_obs),
'initial_state_mean': np.zeros(n_dim_state),
'initial_state_covariance': np.eye(n_dim_state),
'random_state': 0,
'em_vars': [
'transition_covariance',
'observation_covariance',
'initial_state_mean',
'initial_state_covariance'
],
}
converters = self._get_param_converters()
parameters = preprocess_arguments([arguments, defaults], converters)
return (
parameters['transition_matrices'],
parameters['transition_offsets'],
parameters['transition_covariance'],
parameters['observation_matrices'],
parameters['observation_offsets'],
parameters['observation_covariance'],
parameters['initial_state_mean'],
parameters['initial_state_covariance']
)
def _get_param_converters(self):
converters = {
'transition_matrices': array2d,
'transition_offsets': array1d,
'transition_covariance': array2d,
'observation_matrices': array2d,
'observation_offsets': array1d,
'observation_covariance': array2d,
'initial_state_mean': array1d,
'initial_state_covariance': array2d,
'random_state': check_random_state,
'n_dim_state': int,
'n_dim_obs': int,
'em_vars': lambda x: x,
}
return converters
def maximization(self, y, x, p, h, a=None):
"""y is expected to be 4-dimensional (b*Length*Outs*1),
x is 4-dims (b*Length*States*1),
p is 3-dims (Length*States*States) symmetric positive definite
(will try to handle pos. semi-definite),
h is 3-dims (Length-1*States*States),
a (optional) is 3-dims (Length-1*States*States).
"""
xtt_outer = x @ x.permute(0, 1, 3, 2)
c_xtxt = (p + xtt_outer).mean(dim=0)
c_xtxt = (c_xtxt + c_xtxt.permute(0, 2, 1))/2
c_xtxt_inv = c_xtxt.pinverse()
# c_xtxt_inv = (c_xtxt_inv + c_xtxt_inv)/2
c_xtxt_chol = per_batch_cholesky(c_xtxt).tril()
xttm1_outer = x[:, 1:, :, :] @ (x[:, :-1, :, :].permute(0, 1, 3, 2))
# size_ttm1 = xttm1_outer.size(1)
ptcommatm1 = p[1:, :, :] @ h.permute(0, 2, 1)
c_xtxtm1 = (ptcommatm1 + xttm1_outer).mean(dim=0)
# a_tfrom1 = c_xtxtm1 @ c_xtm1xtm1^-1
a_tfrom1 = a
if a is None:
# a_tfrom1 = (c_xtxtm1.permute(0, 2, 1).cholesky_solve(c_xtxt_chol[:-1])).permute(0, 2, 1) # t = 0 is skipped
a_tfrom1 = c_xtxtm1 @ c_xtxt_inv[:-1]
# q_tfrom1 = c_xtxt[1:] - (c_xtxtm1 @ c_xtxt_inv[:-1] @ (c_xtxtm1.permute(0, 2, 1)))
# q_tfrom1 = (q_tfrom1 + q_tfrom1.permute(0, 2, 1))/2
q_tfrom1 = c_xtxt[1:] - (c_xtxtm1 @ (c_xtxtm1.permute(0, 2, 1).cholesky_solve(c_xtxt_chol[:-1])))
if self.kappa < 0:
c_xtxtm1_mean = c_xtxtm1.mean(dim=0)
c_xtxt_mean = c_xtxt.mean(dim=0)
c_xtxt_mean_inv = c_xtxt_mean.pinverse()
a_tfrom1 = c_xtxtm1_mean @ c_xtxt_mean_inv
q_tfrom1 = c_xtxt_mean - (c_xtxtm1_mean @ (c_xtxtm1_mean.permute(1, 0).cholesky_solve(c_xtxt_mean.cholesky())))
# a_tfrom1 = a_tfrom1.unsqueeze(0)
# q_tfrom1 = q_tfrom1.unsqueeze(0)
# cxtxtm1atT = c_xtxtm1 @ a_tfrom1.permute(0, 2, 1)
# cxtxtm1atT_sym = (cxtxtm1atT + cxtxtm1atT.permute(0, 2, 1))
# q_tfrom1 = c_xtxt[1:] - cxtxtm1atT_sym + (a_tfrom1 @ a_tfrom1.permute(0, 2, 1).cholesky_solve(c_xtxt_chol[:-1]))
if self.kappa > 0:
a_t_list = []
a_t_cont = a_tfrom1[0]
a_syl = self.kappa * q_tfrom1[0]
b_syl = c_xtxt[0]
q_syl = c_xtxtm1[0] + self.kappa * q_tfrom1[0] @ | |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dynamic Learning Rate"""
import math
from mindspore._checkparam import Validator as validator
def piecewise_constant_lr(milestone, learning_rates):
r"""
Get piecewise constant learning rate.
Calculate learning rate by given `milestone` and `learning_rates`. Let the value of `milestone` be
:math:`(M_1, M_2, ..., M_N)` and the value of `learning_rates` be :math:`(x_1, x_2, ..., x_N)`. N is the length of
`milestone`. Let the output learning rate be `y`.
.. math::
y[i] = x_t,\ for\ i \in [M_{t-1}, M_t)
Args:
milestone (Union[list[int], tuple[int]]): A list of milestone. This list is a monotone increasing list.
Every element is a milestone step, and must be greater than 0.
learning_rates (Union[list[float], tuple[float]]): A list of learning rates.
Returns:
list[float]. The size of list is :math:`M_N`.
Examples:
>>> milestone = [2, 5, 10]
>>> learning_rates = [0.1, 0.05, 0.01]
>>> output = piecewise_constant_lr(milestone, learning_rates)
>>> print(output)
[0.1, 0.1, 0.05, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01]
"""
validator.check_value_type('milestone', milestone, (tuple, list))
validator.check_value_type('learning_rates', learning_rates, (tuple, list))
if len(milestone) != len(learning_rates):
raise ValueError("For 'piecewise_constant_lr', "
"the size of 'milestone' must be same with the size of 'learning_rates', "
"but got 'milestone' size: {}, 'learning_rates' size: {}."
.format(len(milestone), len(learning_rates)))
lr = []
last_item = 0
for i, item in enumerate(milestone):
validator.check_positive_int(item, f'milestone[{i}]')
validator.check_is_float(learning_rates[i], f'learning_rates[{i}]')
if item < last_item:
raise ValueError(f"For 'piecewise_constant_lr', "
f"the value of milestone[{i}] must be greater than milestone[{i - 1}], "
f"but got milestone[{i}]: {milestone[i]}, "
f"milestone[{i - 1}]: {milestone[i - 1]}.")
lr += [learning_rates[i]] * (item - last_item)
last_item = item
return lr
def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair):
validator.check_positive_int(total_step, 'total_step')
validator.check_positive_int(step_per_epoch, 'step_per_epoch')
validator.check_positive_int(decay_epoch, 'decay_epoch')
validator.check_positive_float(learning_rate, 'learning_rate')
validator.check_is_float(learning_rate, 'learning_rate')
validator.check_positive_float(decay_rate, 'decay_rate')
validator.check_is_float(decay_rate, 'decay_rate')
validator.check_value_type('is_stair', is_stair, [bool])
def exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False):
r"""
Calculates learning rate base on exponential decay function.
For the i-th step, the formula of computing decayed_learning_rate[i] is:
.. math::
decayed\_learning\_rate[i] = learning\_rate * decay\_rate^{\frac{current\_epoch}{decay\_epoch}}
Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`.
Args:
learning_rate (float): The initial value of learning rate.
decay_rate (float): The decay rate.
total_step (int): The total number of steps.
step_per_epoch (int): The number of steps in per epoch.
decay_epoch (int): A value used to calculate decayed learning rate.
is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. Default: False.
Returns:
list[float]. The size of list is `total_step`.
Examples:
>>> learning_rate = 0.1
>>> decay_rate = 0.9
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 1
>>> output = exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch)
>>> print(output)
[0.1, 0.1, 0.09000000000000001, 0.09000000000000001, 0.08100000000000002, 0.08100000000000002]
"""
_check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair)
lr = []
for i in range(total_step):
if is_stair:
lr.append(learning_rate * decay_rate ** math.floor(math.floor(i / step_per_epoch) / decay_epoch))
else:
lr.append(learning_rate * decay_rate ** (math.floor(i / step_per_epoch) / decay_epoch))
return lr
def natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False):
r"""
Calculates learning rate base on natural exponential decay function.
For the i-th step, the formula of computing decayed_learning_rate[i] is:
.. math::
decayed\_learning\_rate[i] = learning\_rate * e^{-decay\_rate * current\_epoch}
Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`.
Args:
learning_rate (float): The initial value of learning rate.
decay_rate (float): The decay rate.
total_step (int): The total number of steps.
step_per_epoch (int): The number of steps in per epoch.
decay_epoch (int): A value used to calculate decayed learning rate.
is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. Default: False.
Returns:
list[float]. The size of list is `total_step`.
Examples:
>>> learning_rate = 0.1
>>> decay_rate = 0.9
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> output = natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> print(output)
[0.1, 0.1, 0.1, 0.1, 0.016529888822158657, 0.016529888822158657]
"""
_check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair)
function = lambda x, y: x
if is_stair:
function = lambda x, y: math.floor(x / y) * y
lr = []
for i in range(total_step):
lr.append(learning_rate * math.e ** (-decay_rate * function(math.floor(i / step_per_epoch), decay_epoch)))
return lr
def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False):
r"""
Calculates learning rate base on inverse-time decay function.
For the i-th step, the formula of computing decayed_learning_rate[i] is:
.. math::
decayed\_learning\_rate[i] = learning\_rate / (1 + decay\_rate * current\_epoch / decay\_epoch)
Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`.
Args:
learning_rate (float): The initial value of learning rate.
decay_rate (float): The decay rate.
total_step (int): The total number of steps.
step_per_epoch (int): The number of steps in per epoch.
decay_epoch (int): A value used to calculate decayed learning rate.
is_stair (bool): If true, learning rate is decayed once every `decay_epoch` times. Default: False.
Returns:
list[float]. The size of list is `total_step`.
Examples:
>>> learning_rate = 0.1
>>> decay_rate = 0.5
>>> total_step = 6
>>> step_per_epoch = 1
>>> decay_epoch = 1
>>> output = inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> print(output)
[0.1, 0.06666666666666667, 0.05, 0.04, 0.03333333333333333, 0.028571428571428574]
"""
_check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair)
lr = []
for i in range(total_step):
if is_stair:
lr.append(learning_rate / (1 + decay_rate * math.floor(math.floor(i / step_per_epoch) / decay_epoch)))
else:
lr.append(learning_rate / (1 + decay_rate * math.floor(i / step_per_epoch) / decay_epoch))
return lr
def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch):
r"""
Calculates learning rate base on cosine decay function.
For the i-th step, the formula of computing decayed_learning_rate[i] is:
.. math::
decayed\_learning\_rate[i] = min\_learning\_rate + 0.5 * (max\_learning\_rate - min\_learning\_rate) *
(1 + cos(\frac{current\_epoch}{decay\_epoch}\pi))
Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`.
Args:
min_lr (float): The minimum value of learning rate.
max_lr (float): The maximum value of learning rate.
total_step (int): The total number of steps.
step_per_epoch (int): The number of steps in per epoch.
decay_epoch (int): A value used to calculate decayed learning rate.
Returns:
list[float]. The size of list is `total_step`.
Examples:
>>> min_lr = 0.01
>>> max_lr = 0.1
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> output = cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
>>> print(output)
[0.1, 0.1, 0.05500000000000001, 0.05500000000000001, 0.01, 0.01]
"""
if not isinstance(min_lr, float):
raise TypeError("For 'cosine_decay_lr', the argument 'min_lr' must be type of float, "
"but got 'min_lr' type: {}.".format(type(min_lr)))
validator.check_non_negative_float(min_lr, "min_lr", None)
validator.check_positive_float(max_lr, 'max_lr')
validator.check_is_float(max_lr, 'max_lr')
validator.check_positive_int(total_step, 'total_step')
validator.check_positive_int(step_per_epoch, 'step_per_epoch')
validator.check_positive_int(decay_epoch, 'decay_epoch')
if min_lr >= max_lr:
raise ValueError("For 'cosine_decay_lr', the 'max_lr' should be greater than the 'min_lr', "
"but got 'max_lr' value: {}, 'min_lr' value: {}.".format(max_lr, min_lr))
delta = 0.5 * (max_lr - min_lr)
lr = []
for i in range(total_step):
tmp_epoch = min(math.floor(i / step_per_epoch), decay_epoch)
lr.append(min_lr + delta * (1 + math.cos(math.pi * tmp_epoch / decay_epoch)))
return lr
def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power,
update_decay_epoch=False):
r"""
Calculates learning rate base on polynomial decay function.
For the i-th step, the formula of computing decayed_learning_rate[i] is:
.. math::
decayed\_learning\_rate[i] = (learning\_rate - end\_learning\_rate) *
(1 - tmp\_epoch / tmp\_decay\_epoch)^{power} + end\_learning\_rate
Where:
.. math::
tmp\_epoch = min(current\_epoch, decay\_epoch)
.. math::
current\_epoch=floor(\frac{i}{step\_per\_epoch})
.. math::
tmp\_decay\_epoch = decay\_epoch
If `update_decay_epoch` is true, update the value of `tmp_decay_epoch` every epoch. The formula is:
.. math::
tmp\_decay\_epoch = decay\_epoch * ceil(current\_epoch / decay\_epoch)
Args:
learning_rate (float): The initial value of learning rate.
end_learning_rate (float): The end value of learning rate.
total_step (int): The total number of steps.
step_per_epoch (int): The number of steps in per epoch.
decay_epoch (int): A value used to calculate decayed learning rate.
power (float): A value used to calculate decayed learning rate. This parameter must be greater than 0.
update_decay_epoch (bool): If true, update `decay_epoch`. Default: False.
Returns:
list[float]. The size of list is `total_step`.
Examples:
>>> learning_rate = 0.1
>>> end_learning_rate = 0.01
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> power = | |
5634195.507417285,
6253461.142118783, 11.538003087103846, 0.015129553230613428, 1.646211033468178e-06],
[1115.400748739093, 1265.5877782356972, 1428.3010571928912, 2714079.301423972, 5048177.206187649,
6403353.83122605, 11.579324789874297, 0.015321153721496901, 1.6447615608703846e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1165.821735989405, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1140.2618729167225, 1259.7696328062204, 1425.9492710629509, 2715399.6210057866, 5204539.152162562,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1140.2618729167225, 1259.7696328062204, 1425.9492710629509, 2715399.6210057866, 5204539.152162562,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1112.0739142572368, 1249.5129354938679, 1421.4288718354624, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.462388216486575, 0.014995216559648368, 1.6977428700459318e-06],
[1102.2121241384732, 1260.5869822935006, 1425.726056523609, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.6414678246117103e-06],
[1172.1322901571223, 1275.3575942303487, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1100.3826382823288, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.6414678246117103e-06],
[1112.0739142572368, 1252.4221946420748, 1421.4288718354624, 2711709.7515290566, 5281477.560804546,
6011677.073363474, 9.787151112326503, 0.014995216559648368, 1.6768806535715052e-06],
[1192.3898611011628, 1261.1135898526488, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014416923144892024, 1.746960716329272e-06],
[1172.1322901571223, 1273.6063854006757, 1424.647927352167, 2787890.321944607, 4970750.218790298,
6264496.097901617, 11.69640114007929, 0.015654224593012078, 1.7492546775003008e-06],
[1115.3671211181227, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.6726603388725022e-06],
[1114.8242474028489, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06]]
self.y_data = [[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[3293.6417749350526, 4000.296651387464, 2749.9811204802127],
[2689.888797413033, 1995.423712500253, 2913.153419949472],
[3641.408349259386, 4149.027032556554, 2979.8648961738068],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[5050.042406848, 3353.631362261292, 4046.1731460186083],
[3211.138664604769, 5346.419128787817, 4918.9261583296375],
[4258.619318468715, 5989.02266715898, 5220.525994374762],
[8936.189342070667, 9965.239758350204, 7563.000813830158],
[6021.767634834487, 8903.495264146026, 7814.127457858662],
[6017.482913124648, 9878.662058584368, 9219.928973267353],
[10383.663899872603, 11692.453275702828, 9778.423392895807],
[9959.714499578859, 12100.473650714735, 10815.113656904918],
[4728.725795408572, 9833.320115679087, 11373.539981017966],
[7040.3231430446585, 11659.214148088358, 11790.152463711856],
[6625.1594545319385, 10968.926234932243, 12625.282931427295],
[15642.10277555957, 16893.988266075045, 14863.564593947294],
[18607.931674621763, 19082.778723217332, 15013.161810006111],
[10626.98560266889, 14816.17950387497, 15506.383314966086],
[19339.61783872236, 19682.730138362174, 15674.580866406783],
[22189.138934738672, 21973.758051439312, 17867.900673260734],
[23690.59769921251, 25783.93771324694, 20809.23996609462],
[14967.7196505778, 22248.082939376614, 22731.8285500197],
[23423.407106244136, 28591.62813777676, 26664.35093389845],
[19075.645612367698, 27207.437259519545, 28128.538674419888],
[29997.930337349753, 33405.527835033296, 28873.62124640369],
[24820.89628524226, 33586.311728094224, 31719.51145267233],
[37282.62405854246, 38046.09208233556, 33387.387843928715],
[28410.627000789853, 35686.40139112738, 36220.30055895746],
[40471.242966725076, 43007.140870292154, 37868.621210244215],
[37398.14699282434, 42565.10811820573, 37870.35351039514],
[45479.57029289774, 47732.36757601205, 41595.563139189384],
[32552.29129704946, 44403.59499708624, 45751.504789978455],
[45786.72576366185, 50791.11058567032, 48223.58538251199],
[51020.88136265659, 54357.122848508094, 50465.40361704311],
[61881.73733921197, 59644.849846935205, 51400.45602940709],
[60457.47777367473, 60647.47618547739, 51432.39988001411],
[62646.79677225086, 58733.17045634589, 53097.700319228185],
[71247.0421208742, 72098.19782720233, 67199.0151309894],
[63957.85300621107, 77501.03448860586, 81158.49902713434],
[93260.27723239115, 94410.9579982488, 86854.8936256963],
[69247.51335865381, 84142.67184010833, 89262.52599409055],
[96023.63022667098, 99075.37552357498, 97413.0256048264],
[115089.56740480401, 109259.95186664716, 99354.46311566826],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[1195.5075793930594, 526.1504936826086, 932.2775739448363],
[615.7980966174894, 2371.6328956458747, 2197.19994390353],
[84.94494447429759, 2139.8419082286405, 3265.87568834472],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[2724.5059366381524, 3420.7700687544716, 2292.325941125391],
[2144.00329464219, 4811.868522654193, 4324.293371510733],
[2689.888797413033, 1995.423712500253, 2913.153419949472],
[3227.723554235312, 3931.1954332489513, 2694.6979836887685],
[3102.5415684431687, 5156.512214548655, 4696.911509918105],
[5050.042406848, 3353.631362261292, 4046.1731460186083],
[3211.138664604769, 5346.419128787817, 4918.9261583296375],
[3293.6417749350526, 4000.296651387464, 2749.9811204802127],
[3636.4519255889963, 7701.036499127755, 8649.018536861058],
[3641.408349259386, 4149.027032556554, 2979.8648961738068],
[7094.934117160581, 4963.715116596285, 5681.255547927741],
[4258.619318468715, 5989.02266715898, 5220.525994374762],
[4728.725795408572, 9833.320115679087, 11373.539981017966],
[6021.767634834487, 8903.495264146026, 7814.127457858662],
[8936.189342070667, 9965.239758350204, 7563.000813830158],
[5108.537042399923, 8846.416734250804, 8264.988876899557],
[6017.482913124648, 9878.662058584368, 9219.928973267353],
[6749.343731353359, 9786.717933043677, 8863.214695950994],
[8699.036586466838, 11186.733448158204, 9991.249310163385],
[9685.64062202062, 10986.969888115693, 9156.021071890882],
[6854.662907936784, 11100.471712774037, 10984.746603997097],
[6625.1594545319385, 10968.926234932243, 12625.282931427295],
[7040.3231430446585, 11659.214148088358, 11790.152463711856],
[9955.857397038104, 11269.85863024913, 9404.157063167368],
[10383.663899872603, 11692.453275702828, 9778.423392895807],
[9959.714499578859, 12100.473650714735, 10815.113656904918],
[7855.450025109316, 13741.228884635286, 13987.2032403042],
[9539.580737744978, 15726.640872161857, 18121.942676224597],
[15272.943154076545, 16049.89530306775, 12544.873510185207],
[10626.98560266889, 14816.17950387497, 15506.383314966086],
[15642.10277555957, 16893.988266075045, 14863.564593947294],
[11972.885691299613, 16740.618540004456, 15604.373301592324],
[14967.7196505778, 22248.082939376614, 22731.8285500197],
[18607.931674621763, 19082.778723217332, 15013.161810006111],
[15600.764007884087, 20229.29363534731, 19865.958258264185],
[17299.3462527832, 22185.51377411193, 20908.129208334474],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[0.8807537900138819, 308.8706699734264, 244.54305628933315],
[348.7682075032219, 79.03123825619325, 251.45129387305434],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[222.55731422260303, 146.9065009953099, 554.3482836092844],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[875.379542803012, 99.28324447428258, 466.83123685949204],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[419.4517590701555, 1833.403663275858, 1907.3039740444506],
[666.5041189662938, 369.17520959179217, 852.6852888192585],
[1195.5075793930594, 526.1504936826086, 932.2775739448363],
[615.7980966174894, 2371.6328956458747, 2197.19994390353],
[84.94494447429759, 2139.8419082286405, 3265.87568834472],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[806.4250159418375, 2573.897296351246, 2788.011669077916],
[2189.475081270695, 1751.4863945013165, 2779.906539602099],
[1511.207677762512, 2088.7074393216253, 1254.3211433445492],
[2724.5059366381524, 3420.7700687544716, 2292.325941125391],
[2144.00329464219, 4811.868522654193, 4324.293371510733],
[2265.587989319462, 4618.719697500087, 3911.8825835162193],
[3337.817233042934, 2204.27579141391, 2869.87751326041],
[2689.888797413033, 1995.423712500253, 2913.153419949472],
[2850.780079119669, 3539.006290856695, 2384.2371130748134],
[5050.042406848, 3353.631362261292, 4046.1731460186083],
[3102.5415684431687, 5156.512214548655, 4696.911509918105],
[3227.723554235312, 3931.1954332489513, 2694.6979836887685],
[3155.6670736396673, 6458.660214702686, 6019.507672510742],
[3293.6417749350526, 4000.296651387464, 2749.9811204802127],
[3211.138664604769, 5346.419128787817, 4918.9261583296375],
[3333.657830110825, 4036.580611849707, 2778.6004285400895],
[3636.4519255889963, 7701.036499127755, 8649.018536861058],
[3641.408349259386, 4149.027032556554, 2979.8648961738068],
[3881.6456447322603, 4524.469986396184, 3141.099951069475],
[3641.863977178802, 7708.587433808557, 8656.844594135235],
[7094.934117160581, 4963.715116596285, 5681.255547927741],
[4028.4909074635952, 7529.888366003115, 7053.717520267663],
[3904.834571608711, 8901.031785138372, 10520.56571402266],
[4258.619318468715, 5989.02266715898, 5220.525994374762],
[4714.951634369974, 7990.049683599002, 7473.374103872339],
[4896.420563119286, 7895.896204411419, 7067.661070062295],
[8830.145285436984, 6453.843953175076, 7234.323435982808],
[4728.725795408572, 9833.320115679087, 11373.539981017966],
[5108.537042399923, 8846.416734250804, 8264.988876899557],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[1698.424036289286, 0.01299795111754957, 168.37538497510388],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[10.666841821962958, 0.10829104953011984, 120.397148384552],
[1725.6082432411808, 14.39254270781067, 32.963287653975904],
[24.29419694458095, 6.387867606153993, 188.9083418160392],
[0.8807537900138819, 308.8706699734264, 244.54305628933315],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[23.975982777690717, 6.757366123987301, 191.0233853494245],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[3614.1945490081284, 403.56408090706645, 64.64230859699205],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[49.500236767375256, 748.8184597207388, 1194.8425780038576],
[59.29231378399418, 73.37614553766984, 507.52095741347256],
[273.34395127563, 47.55919777853988, 219.62646505048573],
[231.49253129291893, 59.0836955422837, 264.88592492996935],
[28.066275586976047, 763.7119287944752, 825.5639218708797],
[222.55731422260303, 146.9065009953099, 554.3482836092844],
[121.64342076581877, 120.4858584700321, 595.5160139781838],
[348.7682075032219, 79.03123825619325, 251.45129387305434],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[451.4713618411107, 250.38998334942517, 710.4584995815518],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[875.379542803012, 99.28324447428258, 466.83123685949204],
[103.35966736125393, 1054.0901300295363, 1140.2307760836932],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[573.8775186027166, 348.50456444541703, 878.1386230306048],
[84.94494447429759, 2139.8419082285986, 3265.87568834472],
[2215.8745136846023, 497.1308621557844, 469.140767742917],
[372.330578765052, 1872.1123080729162, 1732.8198568681098],
[419.4517590701555, 1833.403663275858, 1907.3039740444506],
[666.5041189662938, 369.17520959179217, 852.6852888192585],
[84.94494447429759, 2139.8419082286405, 3265.87568834472],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[156.00522571860756, 2373.563916579705, 3498.689531915568],
[1195.5075793930594, 526.1504936826086, 932.2775739448363],
[645.5498562605851, 401.99901838595474, 957.277825073818],
[615.7980966174894, 2371.6328956458747, 2197.19994390353],
[1272.8274396706872, 656.4073205730202, 1099.5969877098196],
[1049.756553208089, 2433.9776637581167, 2272.2434364372602],
[686.8351067732438, 2487.4607641016514, 2300.204812129034],
[1253.8496497005235, 844.2912509695493, 1537.8776451832111],
[806.4250159418375, 2573.897296351246, 2788.011669077916],
[2189.475081270695, 1751.4863945013165, 2779.906539602099],
[2287.613789100325, 1348.6367817905837, 1878.2952537142526],
[1511.207677762512, 2088.7074393216253, 1254.3211433445492],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[1698.424036289286, 0.01299795111754957, 168.37538497510388],
[1731.6323778993533, 16.43284128682213, 28.548313164582687],
[10.666841821962958, 0.10829104953011984, 120.397148384552],
[1725.6082432411808, 14.39254270781067, 32.963287653975904],
[142.6784616644594, 34.32191793803289, 17.293611825289027],
[30.995120357671627, 1.129181305210288, 47.36986262786443],
[0.7095485344228267, 26.236956930188985, 21.312956679909245],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[24.29419694458095, 6.387867606153993, 188.9083418160392],
[15.857182051422825, 28.07431174700396, 40.861335425806786],
[0.8807537900138819, 308.8706699734264, 244.54305628933315],
[23.975982777695172, 6.757366123987301, 191.02338534941194],
[23.975982777690717, 6.757366123987301, 191.0233853494245],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[41.98736182975695, 4.393729977420127, 167.7678514646283],
[184.7742346627305, 25.47323030257608, 170.68879273981022],
[85.44346461306462, 268.5879004525793, 69.17680850300846],
[3614.1945490081284, 403.56408090706645, 64.64230859699205],
[21.754435630673875, 715.2488256866449, 764.9568460498299],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[28.066275586976047, 763.7119287944752, 825.5639218708797],
[49.500236767375256, 748.8184597207388, 1194.8425780038576],
[59.29231378399418, 73.37614553766984, 507.52095741347256],
[71.6396219959614, 9.050893401635866, 199.6203564923143],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[273.34395127563, 47.55919777853988, 219.62646505048573],
[231.49253129291893, 59.0836955422837, 264.88592492996935],
[28.069171949223385, 1003.1312033418135, 1027.3516684209444],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[121.64342076581877, 120.4858584700321, 595.5160139781838],
[348.7682075032219, 79.03123825619325, 251.45129387305434],
[221.141679359564, 145.9022039732921, 552.4093156027258],
[324.35512380912814, 159.5829028190041, 290.5371171371144],
[103.16861608217945, 1051.7319992608554, 1136.4449105738381],
[348.7682075032389, 79.03123825620942, 251.45129387306875],
[221.65251822339494, 547.7839636484961, 1144.784349669977],
[39.15158125464487, 1200.4924713762389, 1329.7118311068864],
[222.55731422260303, 146.9065009953099, 554.3482836092844],
[39.14468497062805, 1380.7260489195446, 2655.37490097492],
[239.66160535506611, 159.67815422239494, 577.843151942103],
[2119.566971197377, 470.2314330348149, 454.9514202697302],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[785.003281477448, 130.86712665484018, 530.7839050668333],
[103.35966736125393, 1054.0901300295363, 1140.2307760836932],
[875.379542803012, 99.28324447428258, 466.83123685949204],
[94.37644003738922, 2139.4004585170637, 3223.4173407953876],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[1698.424036289286, 0.01299795111754957, 168.37538497510388],
[10.666841821962958, 0.10829104953011984, 120.397148384552],
[203.594770917816, 19.824142184046142, 11.293382524271141],
[0.0010290730588207357, 12.411000033390046, 42.754791797188105],
[142.6784616644594, 34.32191793803289, 17.293611825289027],
[325.5962494985258, 2.0637405787549548, 5.210879374044886],
[0.7095485344228267, 26.236956930188985, 21.312956679909245],
[0.7835586192627262, 1.676351289965749, 100.34701702002148],
[3.5731860780187756, 53.16877490582369, 3.009071202302619],
[30.995120357671627, 1.129181305210288, 47.36986262786443],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1533.6139300255934, 14.159421830163234, 26.415486450323698],
[24.29419694458095, 6.387867606153993, 188.9083418160392],
[15.857182051422825, 28.07431174700396, 40.861335425806786],
[153.2434867525576, 7.000938846496265, 113.48710702569375],
[1653.3882605950482, 8.670579299028148, 53.8653020856373],
[23.975982777695172, 6.757366123987301, 191.02338534941194],
[0.8440182840134394, 80.34587155530498, 1.0382882544643537],
[23.975982777690717, 6.757366123987301, 191.0233853494245],
[41.98736182975695, 4.393729977420127, 167.7678514646283],
[1679.8161835319363, 11.843501245199953, 35.4908477026538],
[1731.6323778993533, 16.43284128682213, 28.548313164582687],
[1731.6323778993533, 16.43284128682213, 28.548313164582687],
[0.8807537900138819, 308.8706699734264, 244.54305628933315],
[2737.591321461413, 112.62471052602751, 6.859757602699076],
[184.7742346627305, 25.47323030257608, 170.68879273979834],
[1725.6082432411808, 14.39254270781067, 32.963287653975904],
[5.775639905375375, 190.72659909664773, 76.81780388215864],
[1082.1781362040533, 13.305516743552916, 171.28408521191926],
[7.106795994094727, 217.37001170891475, 36.21695057138824],
[21.31264283001281, 157.55891288396842, 21.479461987177856],
[2104.212181935844, 5.23347954486414, 138.81063798812713],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[184.7742346627305, 25.47323030257608, 170.68879273981022],
[85.44346461306462, 268.5879004525793, 69.17680850300846],
[2288.359976084589, 28.994927748993238, 68.79006264030811],
[21.754435630673875, 715.2488256866449, 764.9568460498299],
[23.975982777699624, 6.757366123989664, 191.0233853494245],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[28.783696784989797, 9.384950799991389, 203.82246493282764],
[25.08181172238115, 13.693906521142155, 241.73334424006276],
[3489.7925919810664, 367.75052437072355, 52.34093336755523],
[71.6396219959614, 9.050893401635866, 199.6203564923143],
[34.520498791605654, 326.57694614810976, 418.9483490110867],
[28.066275586976047, 763.7119287944752, 825.5639218708797],
[31.923406125744755, 489.4423110958731, 605.4021271616411],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[1698.424036289286, 0.01299795111754957, 168.37538497510388],
[142.6784616644594, 34.32191793803289, 17.293611825289027],
[0.5772211124468531, 74.2102600709729, 0.30155031314624153],
[0.6424484977527564, 26.608086273080183, 20.989619496872145],
[0.0008582001632676055, 69.44727686411046, 0.3071014948342446],
[26.13058550809608, 2.141480924070444, 42.10217361371894],
[203.594770917816, 19.824142184046142, 11.293382524271141],
[30.995120357671627, 1.129181305210288, 47.36986262786443],
[10.666841821962958, 0.10829104953011984, 120.397148384552],
[0.0010290730588207357, 12.411000033390046, 42.754791797188105],
[1595.095376997128, 0.0610148161277292, 150.2366866729055],
[207.6312233512305, 15.22131442465849, 6.291022749656092],
[325.5962494985258, 2.0637405787549548, 5.210879374044886],
[3.5731860780187756, 53.16877490582369, 3.009071202302619],
[11.29966006255687, 0.5830898866905679, 104.71988885779116],
[0.7835586192627262, 1.676351289965749, 100.34701702002148],
[3.20923396464739, 0.9783319000006608, 97.54501629398654],
[0.7095485344228267, 26.236956930188985, 21.312956679909245],
[4.278577793961269, 15.360752799924295, 33.54765088157793],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[7.045007797487232, 18.17242417586246, 51.9346785536019],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1679.762645123694, 1.0718170703870942, 208.28785635417034],
[153.2434867525576, 7.000938846496265, 113.48710702569375],
[15.857182051422825, 28.07431174700396, 40.861335425806786],
[17.541944891867267, 3.832153772764636, 174.53972273700833],
[0.05315462890899393, 138.61630888519258, 46.322536093479506],
[1679.8161835319363, 11.843501245199953, 35.4908477026538],
[29.745105957679694, 2.7021323444944745, 166.9661513268865],
[4.804381395324434, 5.799601703728597, 236.36883796813848],
[0.8236638930928141, 80.21190969293276, 1.0224442381977954],
[224.6297101975616, 19.77992966447502, 14.944769219789107],
[1925.8072004787518, 38.88394581165614, 10.644278459309703],
[35.10481877686825, 66.5769325905058, 11.834380520576785],
[1656.9560860562453, 9.742185462390303, 49.871561672973684],
[44.436501237597625, 55.3183631663894, 23.14432518736778],
[1533.6139300255934, 14.159421830163234, 26.415486450323698],
[1653.3882605950482, 8.670579299028148, 53.8653020856373],
[1731.6323778993533, 16.43284128682213, 28.548313164582687],
[180.85857012332116, 13.460337053275351, 135.23383236174564],
[1725.6082432411808, 14.39254270781067, 32.963287653975904],
[0.8440182840134394, 80.34587155530498, 1.0382882544643537],
[21.27072387211216, 5.445982970611186, 183.9721434425518],
[1731.6323778993533, 16.43284128682213, 28.548313164582687],
[2104.212181935844, 5.23347954486414, 138.81063798812713],
[41.98736182975695, 4.393729977420127, 167.7678514646283],
[184.7742346627305, 25.47323030257608, 170.68879273979834],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[1698.4240362893236, 0.01299795111744588, 168.37538497509206],
[1698.424036289286, 0.01299795111754957, 168.37538497510388],
[202.35113541078235, 17.063449150127163, 7.426857106060623],
[207.6312233512305, 15.22131442465849, 6.291022749656092],
[0.5772211124468531, 74.2102600709729, 0.30155031314624153],
[0.0008582001632676055, 69.44727686411046, 0.3071014948342446],
[30.995120357671627, 1.129181305210288, 47.36986262786443],
[325.5962494985258, 2.0637405787549548, 5.210879374044886],
[11.29966006255687, 0.5830898866905679, 104.71988885779116],
[0.04022760342216386, 4.175523328298983, 83.98280887651758],
[1595.095376997128, 0.0610148161277292, 150.2366866729055],
[271.78999830670466, 8.31837941503812, 5.23694910379756],
[0.0010290730588207357, 12.411000033390046, 42.754791797188105],
[0.7866796241855081, 0.7482338284857643, 126.95937544720896],
[10.666841821962958, 0.10829104953011984, 120.397148384552],
[21.513257606901835, 10.778569167839118, 10.748948298837892],
[3.5731860780187756, 53.16877490582369, 3.009071202302619],
[242.04472754817434, 10.71498387628563, 4.9484867291264365],
[1.9860473235411782, 19.190032496109808, 30.48515479005733],
[19.35606118638945, 4.205685765701538, 34.632844709404786],
[0.7835586192627262, 1.676351289965749, 100.34701702002148],
[26.13058550809608, 2.141480924070444, 42.10217361371894],
[0.03724473476969815, 33.41040897798797, 10.286782340230328],
[3.20923396464739, | |
<reponame>Mehul343/discord.py-self<gh_stars>0
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
from base64 import b64encode
import json
import logging
from random import choice
from typing import (
Any,
ClassVar,
Coroutine,
Dict,
Iterable,
List,
Optional,
Sequence,
TYPE_CHECKING,
Type,
TypeVar,
Union,
)
from urllib.parse import quote as _uriquote
import weakref
import aiohttp
from .enums import RelationshipAction, InviteType
from .errors import HTTPException, Forbidden, NotFound, LoginFailure, DiscordServerError, InvalidArgument
from . import utils
from .tracking import ContextProperties
from .utils import MISSING
_log = logging.getLogger(__name__)
if TYPE_CHECKING:
from .file import File
from .enums import AuditLogAction, ChannelType
from .message import Message
from .types import (
appinfo,
audit_log,
channel,
emoji,
guild,
integration,
invite,
member,
message,
template,
role,
user,
webhook,
widget,
team,
threads,
sticker,
welcome_screen,
)
from .types.snowflake import Snowflake, SnowflakeList
from types import TracebackType
T = TypeVar('T')
BE = TypeVar('BE', bound=BaseException)
MU = TypeVar('MU', bound='MaybeUnlock')
Response = Coroutine[Any, Any, T]
async def json_or_text(response: aiohttp.ClientResponse) -> Union[Dict[str, Any], str]:
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return utils._from_json(text)
except KeyError:
# Thanks Cloudflare
pass
return text
def _gen_accept_encoding_header():
return 'gzip, deflate, br' if aiohttp.http_parser.HAS_BROTLI else 'gzip, deflate' # type: ignore
class Route:
BASE: ClassVar[str] = 'https://discord.com/api/v9'
def __init__(self, method: str, path: str, **parameters: Any) -> None:
self.path: str = path
self.method: str = method
url = self.BASE + self.path
if parameters:
url = url.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url: str = url
# Major parameters
self.channel_id: Optional[Snowflake] = parameters.get('channel_id')
self.guild_id: Optional[Snowflake] = parameters.get('guild_id')
self.webhook_id: Optional[Snowflake] = parameters.get('webhook_id')
self.webhook_token: Optional[str] = parameters.get('webhook_token')
@property
def bucket(self) -> str:
# TODO: Implement buckets :(
return f'{self.channel_id}:{self.guild_id}:{self.path}'
class MaybeUnlock:
def __init__(self, lock: asyncio.Lock) -> None:
self.lock: asyncio.Lock = lock
self._unlock: bool = True
def __enter__(self: MU) -> MU:
return self
def defer(self) -> None:
self._unlock = False
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self._unlock:
self.lock.release()
# For some reason, the Discord voice websocket expects this header to be
# completely lowercase while aiohttp respects spec and does it as case-insensitive
aiohttp.hdrs.WEBSOCKET = 'websocket' # type: ignore
# Support brotli if installed
aiohttp.client_reqrep.ClientRequest.DEFAULT_HEADERS[aiohttp.hdrs.ACCEPT_ENCODING] = _gen_accept_encoding_header() # type: ignore
class _FakeResponse:
def __init__(self, reason: str, status: int) -> None:
self.reason = reason
self.status = status
class HTTPClient:
"""Represents an HTTP client sending HTTP requests to the Discord API."""
def __init__(
self,
connector: Optional[aiohttp.BaseConnector] = None,
*,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
unsync_clock: bool = True,
) -> None:
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session: aiohttp.ClientSession = MISSING
self._locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
self._global_over: asyncio.Event = asyncio.Event()
self._global_over.set()
self.token: Optional[str] = None
self.ack_token: Optional[str] = None
self.proxy: Optional[str] = proxy
self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth
self.use_clock: bool = not unsync_clock
self.user_agent: str = MISSING
self.super_properties: Dict[str, Any] = {}
self.encoded_super_properties: str = MISSING
self._started: bool = False
def __del__(self) -> None:
session = self.__session
if session:
try:
session.connector._close()
except AttributeError:
pass
async def startup(self) -> None:
if self._started:
return
self.__session = session = aiohttp.ClientSession(connector=self.connector)
self.user_agent, self.browser_version, self.client_build_number = ua, bv, bn = await utils._get_info(session)
_log.info('Found user agent %s (%s), build number %s.', ua, bv, bn)
self.super_properties = sp = {
'os': 'Windows',
'browser': 'Chrome',
'device': '',
'browser_user_agent': ua,
'browser_version': bv,
'os_version': '10',
'referrer': '',
'referring_domain': '',
'referrer_current': '',
'referring_domain_current': '',
'release_channel': 'stable',
'system_locale': 'en-US',
'client_build_number': bn,
'client_event_source': None
}
self.encoded_super_properties = b64encode(json.dumps(sp).encode()).decode('utf-8')
self._started = True
async def ws_connect(self, url: str, *, compress: int = 0, host: Optional[str] = None) -> Any:
if not host:
host = url[6:].split('?')[0].rstrip('/') # Removes 'wss://' and the query params
kwargs: Dict[str, Any] = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': 30.0,
'autoclose': False,
'headers': {
'Accept-Language': 'en-US',
'Cache-Control': 'no-cache',
'Connection': 'Upgrade',
'Host': host,
'Origin': 'https://discord.com',
'Pragma': 'no-cache',
'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits',
'User-Agent': self.user_agent,
},
'compress': compress,
}
return await self.__session.ws_connect(url, **kwargs)
async def request(
self,
route: Route,
*,
files: Optional[Sequence[File]] = None,
form: Optional[Iterable[Dict[str, Any]]] = None,
**kwargs: Any,
) -> Any:
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
# Header creation
headers = {
'Accept-Language': 'en-US',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Origin': 'https://discord.com',
'Pragma': 'no-cache',
'Referer': 'https://discord.com/channels/@me',
'Sec-CH-UA': '"Google Chrome";v="{0}", "Chromium";v="{0}", ";Not A Brand";v="99"'.format(self.browser_version.split('.')[0]),
'Sec-CH-UA-Mobile': '?0',
'Sec-CH-UA-Platform': '"Windows"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': self.user_agent,
'X-Discord-Locale': 'en-US',
'X-Debug-Options': 'bugReporterEnabled',
'X-Super-Properties': self.encoded_super_properties
}
# Header modification
if self.token is not None and kwargs.get('auth', True):
headers['Authorization'] = self.token
reason = kwargs.pop('reason', None)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason)
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils._to_json(kwargs.pop('json'))
if 'context_properties' in kwargs:
props = kwargs.pop('context_properties')
if isinstance(props, ContextProperties):
headers['X-Context-Properties'] = props.value
if kwargs.pop('super_properties_to_track', False):
headers['X-Track'] = headers.pop('X-Super-Properties')
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
await self._global_over.wait()
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if files:
for f in files:
f.reset(seek=tries)
if form:
form_data = aiohttp.FormData(quote_fields=False)
for params in form:
form_data.add_field(**params)
kwargs['data'] = form_data
try:
async with self.__session.request(method, url, **kwargs) as response:
_log.debug('%s %s with %s has returned %s.', method, url, kwargs.get('data'), response.status)
data = await json_or_text(response)
# Check if we have rate limit information
remaining = response.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and response.status != 429:
# We've depleted our current bucket
delta = utils._parse_ratelimit_header(response, use_clock=self.use_clock)
_log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
# Request was successful so just return the text/json
if 300 > response.status >= 200:
_log.debug('%s %s has received %s', method, url, data)
return data
# Rate limited
if response.status == 429:
if not response.headers.get('Via') or isinstance(data, str):
# Banned by Cloudflare more than likely.
raise HTTPException(response, data)
fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s".'
# Sleep a bit
retry_after: float = data['retry_after']
_log.warning(fmt, retry_after, bucket)
# Check if it's a global rate limit
is_global = data.get('global', False)
if is_global:
_log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after)
self._global_over.clear()
await asyncio.sleep(retry_after)
_log.debug('Done sleeping for the rate limit. Retrying...')
# Release the global lock now that the rate limit passed
if is_global:
self._global_over.set()
_log.debug('Global rate limit is now over.')
continue
# Unconditional retry
if response.status in {500, 502, 504}:
await asyncio.sleep(1 + tries * 2)
continue
# Usual error cases
if response.status == 403:
raise Forbidden(response, data)
elif response.status == 404:
raise NotFound(response, data)
elif response.status >= 500:
raise DiscordServerError(response, data)
else:
raise HTTPException(response, data)
# This is handling exceptions from the request
except OSError as e:
# Connection reset by peer
if tries < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + tries * 2)
continue
raise
if response is not None:
# We've run out of retries, raise
if response.status >= 500:
raise DiscordServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('Unreachable code in HTTP handling')
async def get_from_cdn(self, url: str) -> bytes:
async with self.__session.get(url) as resp:
if resp.status | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.27338,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 9.20538,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.77103e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.502556,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.810604,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.409166,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.72233,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.574776,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.94809,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.34585e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0210794,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.152432,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.155895,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.152435,
'Execution Unit/Register Files/Runtime Dynamic': 0.176975,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.321132,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.10139,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 3.60876,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00104847,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00104847,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000909685,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00035022,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00223945,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00524608,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.010179,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.149866,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.326579,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.509013,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96396,
'Instruction Fetch Unit/Runtime Dynamic': 1.00088,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0420631,
'L2/Runtime Dynamic': 0.0286877,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 8.00094,
'Load Store Unit/Data Cache/Runtime Dynamic': 3.28873,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.218826,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.218826,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 9.03428,
'Load Store Unit/Runtime Dynamic': 4.58672,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.539586,
'Load Store Unit/StoreQ/Runtime Dynamic': 1.07917,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.191501,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.192129,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0535475,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.885069,
'Memory Management Unit/Runtime Dynamic': 0.245677,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 27.4629,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 9.16521e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.022674,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.268495,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
last_batch = self.key_value
self.list_docs = list(self.__chunks(self.all_keys, Atomicity.num_docs))
for op_type in self.op_type:
if op_type == "create":
if len(self.op_type) != 1:
commit = True
else:
commit = self.commit
docs = list(self.__chunks(docs, Atomicity.num_docs))
for doc in docs:
err = Transaction().RunTransaction(self.transaction, self.bucket, doc, [], [], commit, Atomicity.sync, Atomicity.updatecount )
if err:
exception = self.__retryDurabilityImpossibleException(err, doc, commit, op_type="create")
if exception:
break
if not commit:
self.all_keys = []
if op_type == "update" or op_type == "rebalance_only_update":
for doc in self.list_docs:
err = Transaction().RunTransaction(self.transaction, self.bucket, [], doc, [], self.commit, Atomicity.sync, Atomicity.updatecount )
if err and not Atomicity.record_fail:
exception = self.__retryDurabilityImpossibleException(err, doc, self.commit, op_type="update")
if exception:
break
if self.commit:
self.update_keys = self.all_keys
if op_type == "update_Rollback":
exception = Transaction().RunTransaction(self.transaction, self.bucket, [], self.update_keys, [], False, True, Atomicity.updatecount )
if op_type == "delete" or op_type == "rebalance_delete":
for doc in self.list_docs:
err = Transaction().RunTransaction(self.transaction, self.bucket, [], [], doc, self.commit, Atomicity.sync, Atomicity.updatecount )
if err:
exception = self.__retryDurabilityImpossibleException(err, doc, self.commit, op_type="delete")
if exception:
break
if self.commit:
self.delete_keys = self.all_keys
if op_type == "general_update":
for client in Atomicity.clients:
self.batch_update(last_batch, client, persist_to=self.persist_to, replicate_to=self.replicate_to,
timeout=self.timeout, time_unit=self.time_unit, doc_type=self.generator.doc_type)
self.update_keys = last_batch.keys()
if op_type == "general_delete":
self.test_log.debug("performing delete for keys {}".format(last_batch.keys()))
for client in Atomicity.clients:
keys = self.batch_delete(last_batch, client, persist_to=self.persist_to,
replicate_to=self.replicate_to,
timeout=self.timeout,
timeunit=self.time_unit,
durability="")
self.delete_keys = last_batch.keys()
if op_type == "rebalance_update" or op_type == "create_update":
self.update_keys = self.all_keys
err = Transaction().RunTransaction(self.transaction, self.bucket, docs, self.all_keys, [], self.commit, True, Atomicity.updatecount)
if err and not Atomicity.record_fail:
exception = self.__retryDurabilityImpossibleException(err, docs, self.commit, op_type="create", update_keys=self.all_keys)
elif err:
self.all_keys = []
if op_type == "time_out":
err = Transaction().RunTransaction(self.transaction, self.bucket, docs, [], [], True, True, Atomicity.updatecount )
if "AttemptExpired" in str(err):
self.test_log.info("Transaction Expired as Expected")
for line in err:
self.test_log.info("{}".format(line))
self.test_log.debug("End of First Transaction that is getting timeout")
else:
exception = err
time.sleep(60)
if exception and not Atomicity.record_fail:
self.set_exception(Exception(exception))
break
self.test_log.info("Atomicity Load generation thread completed")
self.inserted_keys = {}
for client in Atomicity.clients:
self.inserted_keys[client] = []
self.inserted_keys[client].extend(self.all_keys)
self.test_log.info("Starting Atomicity Verification thread")
self.process_values_for_verification(self.keys_values)
for client in Atomicity.clients:
result_map = self.batch_read(self.keys_values.keys(), client)
wrong_values = self.validate_key_val(result_map[0], self.keys_values, client)
if wrong_values:
self.set_exception("Wrong key value. "
"Wrong key value: {}"
.format(','.join(wrong_values)))
for key in self.delete_keys:
if key in self.inserted_keys[client]:
self.inserted_keys[client].remove(key)
if self.inserted_keys[client] and "time_out" not in self.op_type:
self.set_exception("Keys were missing. "
"Keys missing: {}"
.format(','.join(self.inserted_keys[client])))
self.test_log.info("Completed Atomicity Verification thread")
self.complete_task()
def __chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def __retryDurabilityImpossibleException(self, err, docs, commit, op_type="update", update_keys=[]):
if "DurabilityImpossibleException" in str(err):
self.test_log.info("DurabilityImpossibleException seen so retrying")
n=5
while n > 0:
n -= 1
time.sleep(30)
if op_type == "create":
err = Transaction().RunTransaction(self.transaction, self.bucket, docs, update_keys, [], commit, Atomicity.sync, Atomicity.updatecount)
if "update" in op_type:
err = Transaction().RunTransaction(self.transaction, self.bucket, [], docs, [], commit, Atomicity.sync, Atomicity.updatecount)
if "delete" in op_type:
err = Transaction().RunTransaction(self.transaction, self.bucket, [], [], docs, commit, Atomicity.sync, Atomicity.updatecount)
if "DurabilityImpossibleException" in str(err):
self.test_log.info("DurabilityImpossibleException seen so retrying")
else:
break
return err
def validate_key_val(self, map, key_value, client):
wrong_values = []
for key, value in key_value.items():
if key in map:
if self.op_type == "time_out":
expected_val = {}
else:
expected_val = Json.loads(value)
actual_val = {}
if map[key]['cas'] != 0:
actual_val = Json.loads(map[key]['value'].toString())
elif map[key]['error'] is not None:
actual_val = map[key]['error'].toString()
if expected_val == actual_val or map[key]['cas'] == 0:
self.inserted_keys[client].remove(key)
else:
wrong_values.append(key)
self.test_log.info("actual value for key {} is {}".format(key,actual_val))
self.test_log.info("expected value for key {} is {}".format(key,expected_val))
return wrong_values
def process_values_for_verification(self, key_val):
for key, value in key_val.items():
if key in self.update_keys or self.op_type =="verify":
try:
value = key_val[key] # new updated value, however it is not their in orginal code "LoadDocumentsTask"
value_json = Json.loads(value)
value_json['mutated'] += Atomicity.updatecount
value = Json.dumps(value_json)
except ValueError:
self.random.seed(key)
index = self.random.choice(range(len(value)))
value = value[0:index] + self.random.choice(string.ascii_uppercase) + value[index + 1:]
finally:
key_val[key] = value
class MonitorViewFragmentationTask(Task):
"""
Attempt to monitor fragmentation that is occurring for a given design_doc.
execute stage is just for preliminary sanity checking of values and environment.
Check function looks at index file accross all nodes and attempts to calculate
total fragmentation occurring by the views within the design_doc.
Note: If autocompaction is enabled and user attempts to monitor for fragmentation
value higher than level at which auto_compaction kicks in a warning is sent and
it is best user to use lower value as this can lead to infinite monitoring.
"""
def __init__(self, server, design_doc_name, fragmentation_value=10, bucket="default"):
Task.__init__(self, "monitor_frag_task")
self.server = server
self.bucket = bucket
self.fragmentation_value = fragmentation_value
self.design_doc_name = design_doc_name
self.result = False
def call(self):
self.start_task()
# sanity check of fragmentation value
print "self.fragmentation_value: %s" % self.fragmentation_value
if self.fragmentation_value < 0 or self.fragmentation_value > 100:
err_msg = "Invalid value for fragmentation %d" % self.fragmentation_value
self.set_exception(Exception(err_msg))
try:
auto_compact_percentage = self._get_current_auto_compaction_percentage()
if auto_compact_percentage != "undefined" and auto_compact_percentage < self.fragmentation_value:
self.log.warn("Auto compaction is set to %s. Therefore fragmentation_value %s may not be reached" % (auto_compact_percentage, self.fragmentation_value))
except GetBucketInfoFailed as e:
self.set_exception(e)
except Exception as e:
self.set_exception(e)
self.check()
self.complete_task()
def _get_current_auto_compaction_percentage(self):
""" check at bucket level and cluster level for compaction percentage """
auto_compact_percentage = None
rest = BucketHelper(self.server)
content = rest.get_bucket_json(self.bucket)
if content["autoCompactionSettings"] is False:
# try to read cluster level compaction settings
content = rest.cluster_status()
auto_compact_percentage = content["autoCompactionSettings"]["viewFragmentationThreshold"]["percentage"]
return auto_compact_percentage
def check(self):
rest = RestConnection(self.server)
new_frag_value = 0
timeout = 300
while new_frag_value < self.fragmentation_value and timeout > 0:
new_frag_value = MonitorViewFragmentationTask.calc_ddoc_fragmentation(
rest, self.design_doc_name, bucket=self.bucket)
self.log.info("%s: current amount of fragmentation = %d, \
required: %d" % (self.design_doc_name,
new_frag_value, self.fragmentation_value))
if new_frag_value > self.fragmentation_value:
self.result = True
break
timeout -= 1
sleep(1)
@staticmethod
def aggregate_ddoc_info(rest, design_doc_name, bucket="default",
with_rebalance=False):
nodes = rest.node_statuses()
info = []
for node in nodes:
server_info = {"ip": node.ip,
"port": node.port,
"username": rest.username,
"password": rest.password}
rest = RestConnection(server_info)
status = False
try:
status, content = rest.set_view_info(bucket, design_doc_name)
except Exception as e:
print(str(e))
if "Error occured reading set_view _info" in str(e) and with_rebalance:
print("node {0} {1} is not ready yet?: {2}".format(
node.id, node.port, e.message))
else:
raise e
if status:
info.append(content)
return info
@staticmethod
def calc_ddoc_fragmentation(rest, design_doc_name, bucket="default", with_rebalance=False):
total_disk_size = 0
total_data_size = 0
total_fragmentation = 0
nodes_ddoc_info = \
MonitorViewFragmentationTask.aggregate_ddoc_info(rest,
design_doc_name,
bucket, with_rebalance)
total_disk_size = sum([content['disk_size'] for content in nodes_ddoc_info])
total_data_size = sum([content['data_size'] for content in nodes_ddoc_info])
if total_disk_size > 0 and total_data_size > 0:
total_fragmentation = \
(total_disk_size - total_data_size) / float(total_disk_size) * 100
return total_fragmentation
class ViewCompactionTask(Task):
"""
Executes view compaction for a given design doc. This is technicially view compaction
as represented by the api and also because the fragmentation is generated by the
keys emitted by map/reduce functions within views. Task will check that compaction
history for design doc is incremented and if any work was really done.
"""
def __init__(self, server, design_doc_name, bucket="default", with_rebalance=False):
Task.__init__(self, "view_compaction_task")
self.server = server
self.bucket = bucket
self.design_doc_name = design_doc_name
self.ddoc_id = "_design%2f" + design_doc_name
self.compaction_revision = 0
self.precompacted_fragmentation = 0
self.with_rebalance = with_rebalance
self.rest = RestConnection(self.server)
self.result = False
def call(self):
try:
self.compaction_revision, self.precompacted_fragmentation = \
self._get_compaction_details()
self.log.info("{0}: stats compaction before triggering it: ({1},{2})".
format(self.design_doc_name,
self.compaction_revision, self.precompacted_fragmentation))
if self.precompacted_fragmentation == 0:
self.log.info("%s: There is nothing to compact, fragmentation is 0" %
self.design_doc_name)
self.set_result(False)
return
self.rest.ddoc_compaction(self.ddoc_id, self.bucket)
self.check()
except (CompactViewFailed, SetViewInfoNotFound) as ex:
self.result = False
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
self.set_exception(e)
# verify compaction history incremented and some defraging occurred
def check(self):
try:
_compaction_running = self._is_compacting()
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.info("{0}: stats compaction:revision and fragmentation: ({1},{2})".
format(self.design_doc_name,
new_compaction_revision, fragmentation))
if new_compaction_revision == self.compaction_revision and _compaction_running :
# compaction ran successfully but compaction was not changed
# perhaps we are still compacting
self.log.info("design doc {0} is compacting".format(self.design_doc_name))
self.check()
elif new_compaction_revision > self.compaction_revision or\
self.precompacted_fragmentation > fragmentation:
self.log.info("{1}: compactor was run, compaction revision was changed on {0}".format(new_compaction_revision,
self.design_doc_name))
frag_val_diff = fragmentation - self.precompacted_fragmentation
self.log.info("%s: fragmentation went from %d to %d" % \
(self.design_doc_name,
self.precompacted_fragmentation, fragmentation))
if frag_val_diff > 0:
# compaction ran successfully but datasize still same
# perhaps we are still compacting
if self._is_compacting():
self.check()
self.log.info("compaction was completed, but fragmentation value {0} is more | |
<reponame>shreeram0206/structure_demo<filename>src/models/layers.py
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from models import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, | |
<gh_stars>0
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0683115,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.256343,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.370276,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.283367,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.490689,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.281424,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.05548,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.223328,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.96977,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0699531,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0102723,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0998003,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0759698,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.169753,
'Execution Unit/Register Files/Runtime Dynamic': 0.0862421,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.259873,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.654669,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.45811,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00142112,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00142112,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00124257,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000483632,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00109131,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00517613,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0134549,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0730317,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.64544,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.218047,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.248049,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.09286,
'Instruction Fetch Unit/Runtime Dynamic': 0.557759,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.040404,
'L2/Runtime Dynamic': 0.0085189,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.37966,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.51035,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.101669,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.101669,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.86172,
'Load Store Unit/Runtime Dynamic': 2.11341,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.250698,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.501396,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0889734,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0895797,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.288837,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0357473,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.601153,
'Memory Management Unit/Runtime Dynamic': 0.125327,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.1276,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.244051,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0174266,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.143547,
'Renaming Unit/Int Front End RAT/Subthreshold | |
"en yaXama ": -2099, " baXinda ": -2100, "ken Xen ": -2101, " Xiklik ": 2102,
"vara aX": -2103, "kdire X": 2104, "n Xutu ": 2105, "n aXild": 2106, "sel ekX": 2107,
"yol Xu ": -2108, "len Xu ": -2109, "m yarIX": 2110, "Xuheyd": 2111, "Xakaci": 2112,
"boXta ": 2113, "ton Xu": -2114, "Xilili": 2115, "Ximsir": 2116, " aXtir": -2117,
"Xarkit": -2118, "rdak X": -2119, "tlaXa ": -2120, "s baX ": -2121, " arXin": 2122,
"rdaXin": -2123, "mUXur": 2124, "ukaXe": 2125, "aXalt": 2126, "ifXa ": 2127,
"nakaX": 2128, "InmeX": 2129, "labaX": 2130, "vebaX": -2131, "keXan": 2132,
"onXim": 2133, "Xirre": 2134, " Xahp": 2135, "Xkemb": 2136, " elaX": -2137,
"didiX": 2138, "trbaX": -2139, "sOGUX": 2140, "hu Xa": 2141, "Xabi ": -2142,
"Xevar": 2143, "Guk X": -2144, "Xehir": 2145, "aXiye": -2146, " irX": 2147,
"hXa ": -2148, "hrIX": -2149, "ferX": -2150, "oXid": -2151, "IsaX": -2152,
"aXoc": 2153, "kteX": -2154, "Xuay": 2155, "Xyen": -2156, "laXy": -2157,
"draX": -2158, "Xs ": -2159, "eiX": -2160, "suX": -2161, " yeni yaXama": -2162,
"In baXinin ": 2163, "ya baXini": -2164, "basa baX": -2165, "tI baXin": -2166, "cI baXin": -2167,
" Xisler": -2168, "Xendil": 2169, "eklanX": 2170, "mastaX": 2171, "etiXin": -2172,
" beleX": 2173, "menekX": 2174, "taXtik": -2175, "g Xam ": -2176, "tabaXi": 2177,
" diXim": 2178, " neXte": 2179, " kalIX": 2180, "SanlIX": -2181, "iXter ": -2182,
" meXe ": 2183, "rkadaX": 2184, "almuX": -2185, "Xampu": 2186, "baXkl": -2187,
" inX ": 2188, " eXe ": 2189, "Xiris": 2190, "Xsoy ": 2191, "dalaX": 2192,
"mUXuz": 2193, "fadoX": 2194, "yabaX": 2195, "traXl": 2196, "keXli": 2197,
"rlIXi": -2198, " narX": -2199, "niXli": 2200, "eXkal": 2201, "aXkil": -2202,
"arXim": 2203, " abaX": -2204, "rgaXa": 2205, "Xaniz": -2206, "aXima": 2207,
"pekiX": 2208, "yahX": 2209, "Xahm": 2210, "Xuby": -2211, "viXn": 2212,
"ldiX": 2213, " CoX": 2214, "kveX": 2215, "amiX": -2216, "Xuso": 2217,
"nbaX": 2218, " nuX": -2219, "spaX": -2220, "Xade": -2221, "nyaX": -2222,
"sIX": -2223, "xaX": -2224, "dan baXina": -2226, "ni baXina": -2227, " akXama ": 2228,
" bu iXe": -2229, "ruk Xen": 2230, "mandaXi": -2231, "yaXayis": 2232, "g yarIX": 2233,
"rek iXe": 2234, "v yarIX": 2235, "layIXin": 2236, "aXile ": 2237, " rakIX": -2238,
" iXhan": 2239, " taXa ": 2240, "aXayin": 2241, " Xenal": 2242, "taXla ": 2243,
"dinleX": -2244, "dUXtur": -2245, "ankuX": 2246, "yalIX": -2247, "eXite": -2248,
"Xukut": -2249, "n aX ": -2250, "Xenka": 2251, " Xar ": 2252, " Xisk": 2253,
"pIXir": 2254, "ahaXk": 2255, "UlXen": 2256, "UkerX": 2257, "Xinim": -2258,
"Xartr": -2259, "ayXen": 2260, "OvaX": 2261, "amUX": -2262, "Xanv": 2263,
"uXas": -2264, "Xces": 2265, "Xiad": -2266, "muaX": -2267, " IXa": -2268,
"laXk": -2269, "amuX": -2270, "Xubu": -2271, "bIX": -2272, "ir yanlIX": 2273,
"iz baXin": -2274, "k ve Xu ": -2275, "n beXte ": 2276, "ran kurX": -2277, " Xahan ": 2278,
" niXani": 2279, "Xenel ": 2280, "Xamata": 2281, "baXabi": -2282, "Xovmen": 2283,
"marXla": 2284, "iXtiga": 2285, " Xadan": 2286, "la iXe": 2287, "Xalvar": 2288,
" mayIX": -2289, "kateX": 2290, "estaX": 2291, " aXko": 2292, "OkkeX": 2293,
"baXel": -2294, "faXil": -2295, "aykuX": 2296, "kunuX": 2297, " Xanz": 2298,
"panIX": 2299, "natIX": -2300, "artiX": -2301, "iXime": 2302, "Xevv": 2303,
"Xzam": 2304, " saX": -2305, "hdaX": -2306, "ovaX": -2307, "nzeX": 2308,
"Xad ": -2309, "aSIX": -2310, "diXb": 2311, "bUXr": 2312, "tmIX": 2313,
"vXir": 2314, "iyaX": -2315, "Xic ": -2316, "aIX": -2317, "mXa": -2318,
"waX": -2319, "e baXindan": 2320, "lIk yarIX": 2321, "rak baXin": -2322, "man aXim": 2323,
"bir Xu ": -2324, "et paXa": 2325, "t yarIX": 2326, "l yarIX": 2327, "baXark": -2328,
"Ir neX": 2329, "titreX": 2330, " seviX": 2331, "iniXin": 2332, "leXeme": 2333,
" yaGIX": 2334, "uXkin ": 2335, "Xahak": 2336, "hibaX": -2337, "Xifal": 2338,
"Xakay": 2339, "Xiraz": 2340, "dabaX": 2341, "deXik": 2342, " Xang": 2343,
"pIXil": 2344, "leXeb": 2345, " neXe": 2346, "iriX ": 2347, "Xarab": 2348,
"Xenay": 2349, " uluX": -2350, "jerX": -2351, "IdaX": -2352, "hSiX": 2353,
"InIX": 2354, "laXu": -2355, "teXr": 2356, "Xehp": -2357, "meXk": -2359,
"paXt": -2360, "hmuX": -2361, "boXl": 2362, "Xeo": -2363, "ciX": -2364,
"an baXini": -2365, "ve yarIX": 2366, " baXina": -2367, "ra baXin": -2368, " Xanlik": -2369,
"Xarkint": -2370, "e aXilm": 2371, "an Xen ": 2372, "rk baXi": -2373, "beXin ": -2374,
" akXit": 2375, "UkeniX": 2376, " kaXiy": 2377, "konuX ": 2378, "birleX": 2379,
"rIXil": 2380, "mpraX": -2381, "Xortl": 2382, " fiXi": 2383, " itiX": 2384,
" Xips": 2385, "Xemas": 2386, "eXanl": 2387, "metiX": -2388, "dolaX": 2389,
"danIX": 2390, "Xural": 2391, "OkXi": 2392, "kaXg": 2393, "ShaX": 2394,
"Xeft": 2395, "kbeX": 2396, "hhaX": -2397, "Xcas": 2398, "beXm": -2399,
"Xso": -2400, "da baXinda ": -2401, " el Xar": 2402, "steriX ": 2403, "vanda X": -2404,
"Xerbet": 2405, " Xentu": 2406, "Xahidi": 2407, "Xtirah": -2408, " biXey": 2409,
" aXisi": 2410, "Xmeke": 2411, "kopuX": 2412, "Xeyid": -2413, "eXbas": 2414,
"Xuray": 2415, "tokuX": 2416, "laXak": -2417, "iflaX": -2418, "miXim": 2419,
"ohoX": -2420, "koXo": -2421, "aXaf": -2422, "IXak": -2423, " UXe": 2424,
" Xia": 2425, "Xubj": -2426, "uXog": 2427, "Xunc": 2428, "Xmir": 2429,
"UXf": 2430, "UmX": -2431, "me yarIX": 2432, "ener Xen": 2433, "Xoyledir": 2434,
"y OlCUX": 2435, "raXid ": 2436, "ambaXi": -2437, " Xunun": 2438, "laXali": 2439,
" Xahla": 2440, "Xanar ": 2441, "menteX": 2442, "CarXi ": 2443, "nakkaX": 2444,
" Xule ": 2445, "marXin": 2446, "ziliX": 2447, "mekeX": 2448, " Xike": 2449,
"baXci": -2450, " Xemd": 2451, " duX ": 2452, "o Xu ": -2453, " Xamd": 2454,
"Xampa": 2455, "igarX": 2456, "Xtiya": -2457, "kaCIX": 2458, "OtuX": 2459,
"mUXs": 2460, "IvaX": -2461, "steriXl": 2462, "beXliyo": -2463, " aliXa": 2464,
" bi Xi": 2465, "Xirkec": -2466, " kaXin": 2467, "etiXi ": -2468, "bahXis": 2469,
" aXtim": -2470, "GIXtan": -2471, "Xamar ": 2472, " eliX ": 2473, "soruX ": 2474,
"Xardan": 2475, "leXere": 2476, "diXil": 2477, "taXde": 2478, "Xerhi": 2479,
"Xakas": 2480, "beXim": -2481, "tiXam": 2482, " Xarl": 2483, "aXkid": -2484,
" kuXm": -2485, "Xoven": 2486, "eXint": -2487, "iXm ": -2488, "Xgut": -2489,
"Xiog": 2490, "ofiX": -2491, "rak yarIX": 2492, "Xeyhan ": -2493, " tariX": 2494,
"Xanta ": -2495, "iniXi ": 2496, " turXu": 2497, " arXiz": -2498, " aXir ": -2499,
"kansaX": -2500, " diXe": 2501, "UXtem": -2502, "kesiX": 2503, "Xkolu": 2504,
"diliX": 2505, "Xkek": 2506, " UXu": 2507, "priX": -2508, "Xayd": -2509,
"dX": -2510, "en Xen ": 2511, "bir Xut": 2512, "Xeydao": -2513, "malIXi": -2514,
"maraXl": 2515, "Xantiy": 2516, "Xarman": 2517, " iXiy": 2518, "rakuX": 2519,
" iXe": 2520, "Xniko": 2521, "Xseld": -2522, "Xifa ": 2523, "obaX": -2524,
"ekuX": 2525, "nhaX": -2526, "fzIX": -2527, "IXip": 2528, " aXilan ": -2529,
"nim iXi": 2530, "Xerafe": 2531, "Xahade": 2532, "peSkeX": 2533, " aXird": -2534,
"muXum": 2535, "rmiXs": 2536, "ulaX ": 2537, "beXbe": -2538, "bOlUX": 2539,
"belaX": -2540, "Xken ": 2541, "idaX ": -2542, " naXi": -2543, "Xeym": -2544,
" IXt": -2545, "OpUX": 2546, "Xanc": -2547, " Xilini ": 2548, "uk yarIX": 2549,
"Xokunu": 2550, " diXin": 2551, " Xinas": 2552, "madaX": -2553, "iniXe": 2554,
"yeXil": 2555, " kaX ": 2556, "urXit": 2557, "baXac": -2558, "ksaX": -2559,
"mbUX": 2560, "oXim": 2561, "coXa": 2562, "abuX": -2563, "Xokl": 2564,
"Xh ": -2565, "Xt ": -2566, "nataXa": 2567, " kuXtu": -2568, "Xaramp": 2569,
"k aXma": -2570, " Xelal": 2571, " aXiyo": 2572, "ekiliX": 2573, " Xizo": 2574,
"klIXi": -2575, " kliX": 2576, "abaX ": 2577, " bolX": 2578, "baaX": -2579,
" kUX": -2580, "uXsa": 2581, " CeX": 2582, " Xile ": 2583, " aXans": -2584,
" Xabl": 2585, "aXaye": -2586, "Xark ": 2587, "poXet": 2588, " aXig": 2589,
" iXpo": 2590, "leniX": 2591, "aXme": 2592, "eraX": -2593, " Xc": -2594,
" eXiyor": -2595, "Xoyleyd": 2596, "Xensoy": 2597, " kaXir": -2598, " manX ": 2599,
" ediX": 2600, "raXit": 2601, "UXsa": 2602, " eXo": 2603, "fuX": -2604,
"OneliX": 2605, "emiXs": 2606, "tisaX": -2607, "baXip": -2608, " taXt": 2609,
"leXir": 2610, "CekiX": 2611, "menXe": 2612, "aXkt": 2613, "eXbe": 2614,
"Xahe": 2615, " Xoka ": 2616, "SantaX": 2617, " aXirl": -2618, " | |
<gh_stars>1-10
import sys
import numpy as np
from sira.modelling.component_graph import ComponentGraph
from sira.modelling.iodict import IODict
from sira.modelling.structural import Base, Element
np.set_printoptions(threshold=sys.maxsize)
class InfrastructureFactory(object):
@staticmethod
def create_model(config):
if config['system_class'].lower() == 'substation':
return Substation(**config)
elif config['system_class'].lower() == 'powerstation':
return PowerStation(**config)
elif config['system_class'].lower() in ['potablewatertreatmentplant']:
return PotableWaterTreatmentPlant(**config)
elif config['system_class'].lower() == 'PotableWaterPumpStation'.lower():
return PotableWaterPumpStation(**config)
elif config['system_class'].lower() == 'ModelTestStructure'.lower():
return ModelTestStructure(**config)
elif config['system_class'].lower() in ['railnetwork', 'rail_network']:
return RailNetwork(**config)
class Infrastructure(Base):
"""
The top level representation of a system that can respond to a
range of hazards. It encapsulates a number of components that
are used to estimate the response to various hazard levels.
"""
supply_nodes = Element(
'dict',
'The components that supply the infrastructure system',
dict)
output_nodes = Element(
'dict',
'The components that output from the infrastructure system',
dict)
# supply_total = None
_component_graph = None
if_nominal_output = None
system_class = None
sys_dmg_states = None
def __init__(self, **kwargs):
"""
Construct the infrastructure object
:param kwargs: Objects making up the infrastructure
"""
super(Infrastructure, self).__init__(**kwargs)
if not getattr(self, "components", None):
self.components = IODict()
self._component_graph = ComponentGraph(self.components)
def calc_output_loss(self, scenario, component_damage_state_ind):
"""
Calculate the results to the infrastructure given the damage state
parameter
:param scenario: Details of the scenario being run
:param component_damage_state_ind: The array of the component's
damage state samples
:return: 5 lists of calculations
"""
# Component loss caused by the damage
if_level_loss = np.zeros(
(scenario.num_samples, len(self.components)), dtype=np.float64)
# Infrastructure loss: sum of component loss
if_level_economic_loss = np.zeros(
scenario.num_samples, dtype=np.float64)
# Component functionality
if_level_functionality = np.zeros(
(scenario.num_samples, len(self.components)), dtype=np.float64)
# output for the level of damage
if_level_output = np.zeros(
(scenario.num_samples, len(self.output_nodes)), dtype=np.float64)
# ********************
# NOT YET IMPLEMENTED:
# output available as recovery progresses
# if_output_given_recovery = \
# np.zeros((scenario.num_samples, scenario.num_time_steps),
# dtype=np.float64)
# iterate through the samples
for sample_index in range(scenario.num_samples):
# initialise the function and loss arrays for the sample
# component_function_at_time = []
comp_sample_loss = np.zeros(len(self.components))
comp_sample_func = np.zeros(len(self.components))
# Extract the array of damage states for this sample
component_ds = component_damage_state_ind[sample_index, :]
# iterate through the components
for component_index, comp_key in \
enumerate(sorted(self.components.keys())):
component = self.components[comp_key]
# get the damage state for the component
damage_state \
= component.get_damage_state(component_ds[component_index])
# use the damage state attributes to calculate the loss and
# functionality for the component sample
loss = damage_state.damage_ratio * component.cost_fraction
comp_sample_loss[component_index] = loss
comp_sample_func[component_index] = damage_state.functionality
# save this sample's component loss and functionality
if_level_loss[sample_index, :] = comp_sample_loss
if_level_functionality[sample_index, :] = comp_sample_func
# the infrastructure's economic loss for this sample is the sum
# of all component losses
if_level_economic_loss[sample_index] = np.sum(comp_sample_loss)
# estimate the output for this sample's component functionality
if_level_output[sample_index, :] = \
self.compute_output_given_ds(comp_sample_func)
return if_level_loss, if_level_functionality, \
if_level_output, if_level_economic_loss
def get_nominal_output(self):
"""
Estimate the output of the undamaged infrastructure output
nodes.
:return: Numeric value of aggregated output.
"""
if not self.if_nominal_output:
self.if_nominal_output = 0
# `output_nodes` is a dict
for output_comp in list(self.output_nodes.values()): # noqa: E1101
self.if_nominal_output += output_comp['output_node_capacity']
return self.if_nominal_output
def compute_output_given_ds(self, comp_level_func):
"""
Using the graph of components, the output is calculated
from the component functionality parameter.
:param comp_level_func: An array that indicates the functionality
level for each component.
:return: An array of the output level for each output node.
"""
# Create the component graph if one does not yet exist
# if not self._component_graph:
# self._component_graph = ComponentGraph(
# self.components, comp_level_func)
# else:
# self._component_graph.update_capacity(
# self.components, comp_level_func)
self._component_graph.update_capacity(
self.components, comp_level_func)
# calculate the capacity
system_outflows_sample = np.zeros(len(self.output_nodes))
for output_index, (output_comp_id, output_comp) in \
enumerate(self.output_nodes.items()): # noqa: E1101
# track the outputs by source type (e.g. water or coal)
total_supply_flow_by_source = {}
for (supply_comp_id, supply_comp) in \
self.supply_nodes.items(): # noqa: E1101
if_flow_fraction = self._component_graph.maxflow(
supply_comp_id, output_comp_id
)
if_sample_flow = if_flow_fraction * \
supply_comp['capacity_fraction']
if supply_comp['commodity_type'] not in \
total_supply_flow_by_source:
total_supply_flow_by_source[supply_comp['commodity_type']] \
= if_sample_flow
else:
total_supply_flow_by_source[supply_comp['commodity_type']] \
+= if_sample_flow
total_available_flow = min(total_supply_flow_by_source.values())
estimated_capacity_fraction \
= min(total_available_flow, output_comp['capacity_fraction'])
system_outflows_sample[output_index] \
= estimated_capacity_fraction * self.get_nominal_output()
return system_outflows_sample
# pylint: disable=fixme
# TODO: FIX `calc_recov_time_given_comp_ds`
# def calc_recov_time_given_comp_ds(self, component, damage_state, scenario):
# '''
# Calculates the recovery time of a component, given damage state index
# '''
# import scipy.stats as stats
# recovery_parameters = component.get_recovery(damage_state)
# damage_parameters = component.get_damage_state(damage_state)
#
# m = recovery_parameters.recovery_param1 # loc = mean
# s = recovery_parameters.recovery_param2 # scale = standard deviation
# fn = damage_parameters.functionality
# cdf = stats.norm.cdf(scenario.restoration_time_range, loc=m, scale=s)
# return cdf + (1.0 - cdf) * fn
def calc_response(self,
component_loss,
comp_sample_func,
component_damage_state_ind):
"""
Convert the arrays into dicts for subsequent analysis
:param component_loss: The array of component loss values
:param comp_sample_func: The array of component functionality values
:param component_damage_state_ind: The array of component damage state
indicators
:return: A dict of component response statistics
"""
component_list_sorted = np.sort(list(self.components.keys()))
num_samples = np.shape(component_loss)[0]
comp_resp_dict = dict()
comptype_resp_dict = dict()
# ---------------------------------------------------------------
# sys_ds_levels = self.get_system_damage_states()
comp_ds_levels = []
component_damage_state_array = np.array(component_damage_state_ind)
comp_damage_state_ind_binned = dict()
# Collate response of individual components:
for comp_index, comp_id in enumerate(component_list_sorted):
component = self.components[comp_id]
comp_ds_levels = component.damage_states
comp_damage_state_ind_binned[comp_id] \
= np.bincount(component_damage_state_array[:, comp_index],
minlength=len(comp_ds_levels))
comp_resp_dict[(comp_id, 'loss_mean')] \
= np.mean(component_loss[:, comp_index])
comp_resp_dict[(comp_id, 'loss_std')] \
= np.std(component_loss[:, comp_index])
comp_resp_dict[(comp_id, 'func_mean')] \
= np.mean(comp_sample_func[:, comp_index])
comp_resp_dict[(comp_id, 'func_std')] \
= np.std(comp_sample_func[:, comp_index])
comp_resp_dict[(comp_id, 'num_failures')] \
= np.mean(component_damage_state_ind[:, comp_index]
>= (len(component.damage_states) - 1)) # noqa:W503
# ---------------------------------------------------------------
# Collate aggregate response of component grouped by their TYPE:
for ct_id in self.get_component_types():
comps_of_a_type = sorted(list(self.get_components_for_type(ct_id)))
ct_pos_index = [list(component_list_sorted).index(x)
for x in comps_of_a_type]
comptype_resp_dict[(ct_id, 'loss_mean')] \
= np.mean(component_loss[:, ct_pos_index])
comptype_resp_dict[(ct_id, 'loss_std')] \
= np.std(component_loss[:, ct_pos_index])
comptype_resp_dict[(ct_id, 'loss_tot')] \
= np.sum(component_loss[:, ct_pos_index]) / float(num_samples)
comptype_resp_dict[(ct_id, 'func_mean')] \
= np.mean(comp_sample_func[:, ct_pos_index])
comptype_resp_dict[(ct_id, 'func_std')] \
= np.std(comp_sample_func[:, ct_pos_index])
acomponent = self.components[comps_of_a_type[0]]
comptype_resp_dict[(ct_id, 'num_failures')] \
= np.mean(component_damage_state_ind[:, ct_pos_index]
>= (len(acomponent.damage_states) - 1)) # noqa:W503
# ---------------------------------------------------------------
# comp_dmg_state_array_exp = \
# np.around(component_damage_state_array.mean(0))
# comp_dmg_state_array_std = component_damage_state_array.std(0)
rows, _ = np.shape(component_damage_state_array)
comp_cls_dmg_index_binned = dict()
comp_cls_dmg_index_expected = dict()
comp_cls_dmg_level_percentages = dict()
for cls_id in self.get_component_classes():
comps_of_a_cls = \
sorted(list(self.get_components_for_class(cls_id)))
comp_cls_pos_index = \
[list(component_list_sorted).index(x) for x in comps_of_a_cls]
comp_cls_dmg_indices_tmp = \
component_damage_state_array[:, comp_cls_pos_index]
# Iterate over number of samples
tmparr = np.zeros(shape=(rows, len(comp_ds_levels)))
for i in range(rows):
tmparr[i] = np.bincount(comp_cls_dmg_indices_tmp[i],
minlength=len(comp_ds_levels))
comp_cls_dmg_level_percentages_matrix \
= 100 * (tmparr / float(len(comps_of_a_cls)))
comp_cls_dmg_level_percentages[(cls_id, 'mean')] \
= comp_cls_dmg_level_percentages_matrix.mean(0)
comp_cls_dmg_level_percentages[(cls_id, 'std')] \
= comp_cls_dmg_level_percentages_matrix.std(0)
comp_cls_dmg_index_binned[cls_id] = tmparr.mean(0)
tmp = comp_cls_dmg_index_binned[cls_id]
comp_cls_dmg_index_expected[cls_id] \
= np.max(np.where(tmp == np.max(tmp)))
return comp_resp_dict,\
comptype_resp_dict,\
comp_cls_dmg_level_percentages,\
comp_cls_dmg_index_expected
def get_component_types(self):
"""
Convenience method to get the list of components that are
costed.
:return: list of costed component types
"""
uncosted_comptypes = {'CONN_NODE', 'SYSTEM_INPUT', 'SYSTEM_OUTPUT'}
component_types = set()
for component in list(self.components.values()):
if component.component_type not in uncosted_comptypes:
component_types.add(component.component_type)
return list(component_types)
def get_components_for_type(self, component_type):
"""
Return a list of components for the passed component type.
:param component_type: A string representing a component type
:return: List of components with the matching component type.
"""
for component in list(self.components.values()):
if component.component_type == component_type:
yield component.component_id
def get_component_classes(self):
"""
Convenience method to get the list of components that belong to the
same `component_class`.
:return: list of costed component classes
"""
uncosted_compclasses = [
'JUNCTION', 'JUNCTION POINT', 'JUNCTION NODE',
'MODEL ARTEFACT',
'SYSTEM INPUT',
'SYSTEM OUTPUT',
'Generator']
component_classes = set()
for component in list(self.components.values()):
if component.component_class not in uncosted_compclasses:
component_classes.add(component.component_class)
return list(component_classes)
def get_components_for_class(self, component_class):
"""
Return a list of components for the passed component class.
:param component_class: A string representing a component class
:return: List of components with the matching component class.
"""
for component in list(self.components.values()):
if component.component_class == component_class:
yield component.component_id
def get_system_damage_states(self):
"""
Return a list of the damage state labels
:return: List of strings detailing the system damage levels.
"""
# TODO: THIS IS A HACK. NEED A BETTER SOLUTION!
one_comp_obj = list(self.components.values())[0]
self.sys_dmg_states = [one_comp_obj.damage_states[ds].damage_state_name
for ds in one_comp_obj.damage_states]
return self.sys_dmg_states
def get_dmg_scale_bounds(self):
"""
An array of damage scale boundaries
:param scenario: The values for the simulation scenarios
:return: Array of real numbers representing damage state boundaries
"""
# todo introduce system subclass to infrastructure
return [0.01, 0.05, 0.40, 0.70, 1.00]
def get_component_class_list(self):
"""
Return the list of component classes from the components.
Not sure | |
<filename>orig_main.py
import datetime
import time
import json
import os
import yaml
import threading
from fastapi import FastAPI, Request, Query
from fastapi.templating import Jinja2Templates
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from starlette.responses import RedirectResponse
from typing import Optional, List # noqa
# from weatherman import *
from weatherman import weather_butler
from weatherman import data_validator
from bin import setup_weatherman
from weatherman import sql_butler
from bin import logger
appname = 'weatherman'
master_config_path = 'etc/weatherman.yml'
# Logging
"""
I init the logger so i can use it for the api calls.
Because i dont want it to log to the main log i use a startup log that should forever be empy
"""
logger = logger.Logger(appname, app_name_in_file=True, log_suffix='startup')
logit = logger.return_logit()
default_log_file = logger.log_file
logit.debug('logging the this thingy here')
class WeatherMan:
"""
Historic weather data is hard to comeby. There is Weatherunderground but it would just
be easier to gather the data and generate our own reports.
I gather my weather data from https://openweathermap.org.
"""
def __init__(self):
# Variables
self.environment = None
self.testing = None
self.autopolling = None
self.config = None
self._state = None
self._setup = None
self._auto_polling = None
self._dump_list = None
self._report = None
self._last_poll = None
self._timing_intervul = None
self._db_name = None
# ENV
self.environment = os.environ.get('ENVIRONMENT')
if os.environ.get('TESTING').upper() == 'TRUE':
self.testing = True
elif os.environ.get('TESTING').upper() == 'FALSE':
self.testing = False
else:
raise ValueError('TESTING variable not set')
if os.environ.get('AUTOPOLLING').upper() == 'TRUE':
self.auto_polling = True
elif os.environ.get('AUTOPOLLING').upper() == 'FALSE':
self.auto_polling = False
else:
raise ValueError('AUTOPOLLING variable not set')
# Config
self.load_config()
self.setup = False
# Logging
self.set_logging()
logger.update_file_level(
self.config['environments'][self.environment]['log_parameters']['f_level'])
logger.update_consol_level(
self.config['environments'][self.environment]['log_parameters']['c_level'])
# Variables
try:
self.weather_butler = weather_butler.WeatherButler(
self.config['private_config_path'],
self.config['owma_url'],
self.config['key_path']
)
self.config['locations'] = self.weather_butler.config['locations']
self.update_state(self.config['locations'])
except FileNotFoundError:
self.setup = True
self.db_name = self.config['db_name'] + \
self.config['environments'][self.environment]['db_addition']
self.db = sql_butler.SQLButler(self.db_name)
self.db.create_database()
self.update_state({'env': self.environment})
self.update_state({'db_name': self.db_name})
self.update_state({'setup_needed': self.setup})
self.update_state({'timing_intervul': self.setup})
self.update_state({'testing': self.testing})
self.update_state({'db_name': self.db_name})
self.update_state({'working_directory': os.getcwd()})
# Wrapping up
logit.info(f"Starting in {self.environment}")
logit.info(f"logging levels set to fh:{self.state['f_level']} \
ch:{self.state['c_level']} testing:{self.testing}")
logit.debug(f'State: {self.state}')
# Getters/Setters
# Setup
@property
def setup(self):
if self._setup is None:
return False
return self._setup
@setup.setter
def setup(self, value):
self._setup = value
self.update_state({'setup': self.setup})
# Auto Polling
@property
def auto_polling(self):
if self._auto_polling is None:
return False
return self._auto_polling
@auto_polling.setter
def auto_polling(self, value):
self._auto_polling = value
self.update_state({'auto_polling': self.auto_polling})
# Timing Intervul
@property
def timing_intervul(self):
if self._timing_intervul is None:
return self.set_timing_intervul()
return self._timing_intervul
@timing_intervul.setter
def timing_intervul(self, value):
self._timing_intervul = value
self.update_state({'timing_intervul': self.timing_intervul})
# Last Poll
@property
def last_poll(self):
if self._last_poll is None:
return datetime.datetime.strftime(
self.db.get_first_and_last()[-1],
self.config['datetime_str'])
return self._last_poll
@last_poll.setter
def last_poll(self, value):
self._last_poll = value
self.update_state({'last_poll': self.last_poll})
# DB Name
@property
def db_name(self):
if self._db_name is None:
return self.config['db_name']
return self._db_name
@db_name.setter
def db_name(self, value):
self._db_name = value
self.update_state({'db_name': self.db_name})
# Dump List
@property
def dump_list(self):
if self._dump_list is None:
return []
return self._dump_list
def append_dump_list(self, values):
if not isinstance(values, list):
values = [values]
if self._dump_list is None:
self._dump_list = []
for value in values:
self._dump_list.append(value)
def clear_dump_list(self):
self._dump_list = None
# Report
@property
def report(self):
if self._report is None:
return {}
return self._report
def update_report(self, dct):
if self._report is None:
self._report = {}
for key, value in dct.items():
self._report[key] = value
def clear_report(self):
self._report = None
# State
@property
def state(self):
if self._state is None:
return {}
return self._state
def update_state(self, dct):
if self._state is None:
self._state = {}
for key, value in dct.items():
self._state[key] = value
# Config
def load_config(self, path=master_config_path):
with open(path) as ycf:
self.config = yaml.load(ycf, Loader=yaml.FullLoader)
try:
self.config['locations'] = self.weather_butler.config['locations']
self.update_state({'locations': self.config['locations']})
except:
pass
return self.config
def set_logging(self, logging_dct=None):
"""
Set or reset the logging parameters.
"""
if logging_dct is None:
logging_dct = self.config['environments'][self.environment]['log_parameters']
for k, v in logging_dct.items():
if v == 'None':
logging_dct[k] = None
logger.update_file(
appname,
f_level=logging_dct['f_level'],
c_level=logging_dct['c_level'],
log_rolling=logging_dct['log_rolling'],
maxBytes=logging_dct['maxBytes'],
backupCount=logging_dct['backupCount'],
log_directory=logging_dct['log_directory'],
log_prefix=logging_dct['log_prefix'],
log_suffix=logging_dct['log_suffix'],
app_name_in_file=logging_dct['app_name_in_file'],
date_in_file=logging_dct['date_in_file'],
time_in_file=logging_dct['time_in_file'],
utc_in_file=logging_dct['utc_in_file'],
short_datetime=logging_dct['short_datetime']
)
if logger.state['log_rolling'] is not None:
logger.add_rotation()
self.update_state({
'f_level': logging_dct['f_level'],
'c_level': logging_dct['c_level'],
'log_rolling': logging_dct['log_rolling'],
'maxBytes': logging_dct['maxBytes'],
'backupCount': logging_dct['backupCount'],
'log_file': logger.log_file,
})
def poll_weather(self):
"""
Using the weather butler to grabb data from the weather website.
"""
data = self.weather_butler.poll()
logit.debug(f"request: {self.weather_butler.request}")
logit.debug(f"request: {self.weather_butler.request.json()}")
self.last_poll = datetime.datetime.now(tz=datetime.timezone.utc)
return data
def manage_polling(self):
"""
I used to have a use case for needing two functions to do this...
now i just have two functions...
"""
logit.debug('managing polling?')
data = self.poll_weather()
self.db.multi_add(data)
logit.debug('data added to db')
def weather_dump(self, parameters):
"""
Takes the parameters to filter out results from the database.
"""
logit.debug(f'weather dump based on parameters {parameters}')
data = self.db.query_database(parameters)
for weatherdata in data:
new_dct = {i: None for i in self.config['dump_webpage_list']}
dct = dict(weatherdata)
for i, j in dct.items():
if i == 'time':
dct[i] = datetime.datetime.strftime(j, self.config['datetime_str'])
if i == 'name':
for x, y in self.config['locations'].items():
if int(dct['city']) == int(y):
j = x
break
if i in self.config['dump_webpage_list']:
new_dct[i] = j
self.append_dump_list(new_dct)
return self.dump_list
def weather_report(self, data):
"""
This takes a list of dumped weather data and saves off the important bits for a report
"""
def recursive_search(value, key, index, lst, itterations):
# Is this a duplicate weather response?
dex = 0
for thingy, el in enumerate(reversed(lst[:index])):
if value == el[key]:
return True
dex += 1
if dex >= itterations:
return False
return False
report1 = {}
report2 = {}
for name, city in self.config['locations'].items():
# create dict with the city names as the keys to keep all weather data together
report1[name] = []
for line in data:
if line['name'] == name:
report1[name].append(line)
for name, reports in report1.items():
"""
Itterates through the new lists of weather data and
places similar times together in to storms
"""
report2[name] = [[]]
report_index = 0
for index, line in enumerate(reports):
if index == 0:
report2[name][0].append(line)
else:
if reports[index - 1]['time'] <= line['time'] - \
datetime.timedelta(
minutes=int(
self.config['storm_difference_time']
)):
report2[name].append([])
report_index += 1
report2[name][report_index].append(line)
report = {}
for name, reports in report2.items():
# Adds the first and last element into the final storm list
# This is where i will add extra data such as notable weather fluxuations
report[name] = []
for index, event in enumerate(reports):
if len(event) == 0:
continue
elif len(event) == 1:
report[name].append(event)
else:
report[name].append([event[0], event[-1]])
# memory = {
# 'sky_id': []
# }
for itterate, line in enumerate(event):
if itterate in [0, len(event) - 1]:
continue
if not recursive_search(
line['sky_id'],
'sky_id',
itterate,
event,
self.config['storm_difference_itteration']
):
report[name][index].insert(-1, line)
logit.debug('Created a weather report')
return report
def write_report(self, report, file_name=None):
"""
Takes the list of data, updates the datetime objects to strings and saves to a file.
"""
json_report = {}
for name, storms in report.items():
json_report[name] = []
for storm in storms:
if len(storm) > 1:
storm_durration = str(storm[-1]['time'] - storm[0]['time'])
# new_start = storm[0]['time'].strftime("%Y-%m-%dT%H:%M:%SZ")
# new_end = storm[-1]['time'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
if self.config['single_storm_event_flag']:
"""
If the single_storm_event_flag is true the single event storms
will be added. else they will be skipped.
"""
storm_durration = '0'
# new_start = storm[0]['time'].strftime("%Y-%m-%dT%H:%M:%SZ")
# new_end = storm[0]['time'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
continue
for line in storm:
if not isinstance(line['time'], str):
line['time'] = datetime.datetime.strftime(
line['time'],
self.config['datetime_str'])
entry = {
'storm_start': storm[0]['time'],
'storm_end': storm[-1]['time'],
'storm_durration': storm_durration,
'start_dct': storm[0],
'end_dct': storm[-1],
'storm_events': storm,
}
json_report[name].append(entry)
if file_name is None:
file_name = self.config['weather_reports_dir'] + \
'Weather_report_' + \
datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%dT%H:%M:%SZ') + \
'.json'
self.update_report(json_report)
with open(file_name, 'w') as new_report:
json.dump(self.report, new_report, indent=2)
logit.debug(f'Wrote a weatehr report to a file {file_name}')
def clear_search(self):
self.clear_dump_list()
self.clear_report()
def set_timing_intervul(self, intervul='auto'):
"""
Set the amount of time between data pulls.
If it is auto, it will calculate the minimum number of minutes
between polls and take the largest value
between that and the minimum poll timer in the config.
"""
try:
if intervul == 'auto':
calls_made = len(self.state['cities'].keys())
month_minute_converion = 60 * 24 * self.config['estemated_days_per_month']
minimum_intervul = month_minute_converion / \
self.config['max_calls_per_month'] * calls_made
intervul = minimum_intervul
self.timing_intervul = max(intervul, self.config['minimum_poll_time'])
self.state['timing_intervul'] = self.timing_intervul
except:
self.timing_intervul = self.config['minimum_poll_time']
self.auto_polling = False
self.state['auto_polling'] = self.auto_polling
return self.timing_intervul
"""
Spin up the app using fastapp and uvicorn. See the docker-compose file for whats
actually run
"""
app = FastAPI() # noqa
global WM
WM = WeatherMan()
validator = data_validator.DataValidator()
SW = setup_weatherman.SetupWeatherman()
templates = Jinja2Templates(directory="templates/")
app.mount("/static", StaticFiles(directory="static"), name="static")
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def return_state():
# | |
pred) in enumerate(zip(LFC_mats, self.predict())):
resids = LFC_data - pred
CL_noise_var = np.nanvar(resids, axis = 0)
CL_tot_var = np.nanvar(LFC_data, axis = 0)
CL_R2 = 1 - CL_noise_var / CL_tot_var
self.CL_R2_df = pd.concat([self.CL_R2_df,
pd.DataFrame({'CCLE_ID': LFC_data.columns.values,
'batch_id': np.ones_like(CL_R2)*batch_id,
'R2': CL_R2})])
hp_noise_var = np.nanvar(resids, axis = 1)
hp_tot_var = np.nanvar(LFC_data, axis = 1)
hp_R2 = 1 - hp_noise_var / hp_tot_var
self.hp_R2_df = pd.concat([self.hp_R2_df,
pd.DataFrame({'hp_seq': LFC_data.index.values,
'batch_id': np.ones_like(hp_R2)*batch_id,
'R2': hp_R2})])
def init_slopes(self, LFC_mats):
'''Get initial estimates of CL slopes by regressing each CL's data on within-batch avg'''
lm = LinearRegression(fit_intercept=True)
#first get overall slope adjustment per batch
if len(LFC_mats) > 0:
common_hps = reduce(np.intersect1d, [LFC_mat.index.values for LFC_mat in LFC_mats])
else:
common_hps = LFC_mats[0].index.values
if len(common_hps) > 100:
per_batch_avgs = np.ones((len(LFC_mats), len(common_hps)))
for ii, LFC_mat in enumerate(LFC_mats):
cur_d = LFC_mat.ix[np.in1d(LFC_mat.index.values, common_hps),:]
cur_d = cur_d.reset_index().drop_duplicates(subset='index', keep='last').set_index('index')
per_batch_avgs[ii,:] = np.nanmean(cur_d.ix[common_hps,:].values, axis = 1)
ov_avg = np.nanmean(per_batch_avgs, axis = 0)
batch_slopes = np.ones(per_batch_avgs.shape[0])
for ii in range(per_batch_avgs.shape[0]):
uset = np.where(~np.isnan(per_batch_avgs[ii,:]))[0]
lm.fit(ov_avg.reshape(-1,1)[uset,:], per_batch_avgs[ii,uset].transpose())
batch_slopes[ii] = lm.coef_
else:
batch_slopes = np.array([np.nanstd(LFC_mat.values) for LFC_mat in LFC_mats])
batch_slopes = batch_slopes / np.nanmean(batch_slopes)
CL_slopes = np.ones(self.n_CL_batches)
CL_offset = 0
for batch_ii, LFC_mat in enumerate(LFC_mats):
avg_hp = np.nanmean(LFC_mat, axis = 1)
for ii in np.arange(LFC_mat.shape[1]):
uvals = np.where(~np.isnan(LFC_mat.values[:,ii]))[0]
lm.fit(avg_hp.reshape(-1,1)[uvals,:], LFC_mat.values[uvals,ii])
CL_slopes[ii + CL_offset] = lm.coef_ * batch_slopes[batch_ii]
CL_offset += LFC_mat.shape[1]
_=self.sess.run(self.CL_slope.assign(CL_slopes.reshape(-1,1)))
def save(self, results_dir, save_perf_only = False, edward = False):
'''
Write parameter matrices to text files. Also serialize other model params to json file at specified path
'''
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not edward:
other_df = {}
other_df['reg_params'] = self.reg_params
other_df['R2_vals'] = self.R2_vals
other_df['optim_params'] = self.optim_params
other_df['loss_evals'] = self.loss_evals
other_df['SSMD'] = self.SSMD
with open(os.path.join(results_dir, 'other_info.json'), 'w') as f:
dump(other_df, f, primitives = True, allow_nan = True)
if not save_perf_only: #if not just saving performance params
CL_df = pd.DataFrame({
'CCLE_ID': self.data_names['CLs'],
'gene_slope': self.gene_slope.eval(session = self.sess).flatten()
# 'noise_vars': self.CL_noise_vars.eval(session = self.sess).flatten()
})
CL_df.to_csv(os.path.join(results_dir, 'CL_data.csv'), index = False)
CL_batch_df = pd.DataFrame({'CCLE_ID': self.all_CL_names,
'CL_slope': self.CL_slope.eval(session = self.sess).flatten(),
'CL_offset': self.CL_offset.eval(session = self.sess).flatten(),
'CL_batch': self.all_CL_batches,
'noise_vars': self.CL_noise_vars.eval(session = self.sess).flatten()})
if hasattr(self, 'CL_R2'):
CL_batch_df['R2'] = self.CL_R2_df['R2']
if edward:
CL_batch_df['offset_mean'] = self.q_CL_offset.loc.eval().flatten()
CL_batch_df['offset_sd'] = self.q_CL_offset.scale.eval().flatten()
CL_batch_df.to_csv(os.path.join(results_dir, 'CL_batch_data.csv'), index = False)
hp_df = pd.DataFrame({
'hp': self.data_names['hps'],
'unpred_offset': self.hairpin_unpred.eval(session = self.sess).flatten(),
'Geff': self.guide_Geff.eval(session = self.sess).flatten(),
'Seff': self.guide_Seff.eval(session = self.sess).flatten()
})
if edward:
hp_df['unpred_offset_mean'] = self.q_hairpin_unpred.loc.eval()
hp_df['unpred_offset_sd'] = self.q_hairpin_unpred.scale.eval()
hp_df.to_csv(os.path.join(results_dir, 'hp_data.csv'), index = False)
hp_batch_df = pd.DataFrame({
'hp': self.all_hp_seqs,
'hp_batch': self.all_hp_batches,
'hairpin_offset': self.hairpin_offset.eval(session = self.sess).flatten()
})
if hasattr(self, 'hp_R2'):
hp_batch_df['R2'] = self.hp_R2_df['R2']
if edward:
hp_batch_df['hairpin_offset_mean'] = self.q_hairpin_offset.loc.eval()
hp_batch_df['hairpin_offset_sd'] = self.q_hairpin_offset.scale.eval()
hp_batch_df.to_csv(os.path.join(results_dir, 'hp_batch_data.csv'), index = False)
per_gene_df = pd.DataFrame({'avg': self.gene_score_avgs.eval(session = self.sess).flatten()},
index = self.data_names['genes'])
if edward:
gene_mean_df = pd.DataFrame((self.q_gene_score.loc.eval() + \
self.q_gene_score_avgs.loc.eval()).transpose(),
index = self.data_names['genes'], columns = self.data_names['CLs'])
gene_sd_df = pd.DataFrame(np.sqrt(self.q_gene_score.scale.eval()**2 + \
self.q_gene_score_avgs.scale.eval()**2).transpose(),
index = self.data_names['genes'], columns = self.data_names['CLs'])
gene_mean_df = gene_mean_df.where(self.n_used_hairpins_per_gene >= self.min_hairpins_per, other = np.nan)
gene_sd_df = gene_sd_df.where(self.n_used_hairpins_per_gene >= self.min_hairpins_per, other = np.nan)
gene_mean_df.to_csv(os.path.join(results_dir, 'gene_means.csv'))
gene_sd_df.to_csv(os.path.join(results_dir, 'gene_SDs.csv'))
per_gene_df['SD'] = self.q_gene_score_avgs.scale.eval().flatten()
else:
gene_df = pd.DataFrame((self.gene_score.eval(session = self.sess) + \
self.gene_score_avgs.eval(session = self.sess)).transpose(),
index = self.data_names['genes'], columns = self.data_names['CLs'])
gene_df = gene_df.where(self.n_used_hairpins_per_gene >= self.min_hairpins_per, other = np.nan)
gene_df.to_csv(os.path.join(results_dir, 'gene_data.csv'))
per_gene_df.to_csv(os.path.join(results_dir, 'per_gene_data.csv'))
if edward:
seed_mean_df = pd.DataFrame((self.q_seed_score.loc.eval() + \
self.q_seed_score_avgs.loc.eval()).transpose(),
index = self.data_names['seeds'], columns = self.data_names['CLs'])
seed_sd_df = pd.DataFrame(np.sqrt(self.q_seed_score.scale.eval()**2 + \
self.q_seed_score_avgs.scale.eval()**2).transpose(),
index = self.data_names['seeds'], columns = self.data_names['CLs'])
seed_mean_df = seed_mean_df.where(self.n_used_hairpins_per_seed >= self.min_hairpins_per, other = np.nan)
seed_sd_df = seed_sd_df.where(self.n_used_hairpins_per_seed >= self.min_hairpins_per, other = np.nan)
seed_mean_df.to_csv(os.path.join(results_dir, 'seed_means.csv'))
seed_sd_df.to_csv(os.path.join(results_dir, 'seed_SDs.csv'))
else:
seed_df = pd.DataFrame((self.seed_score.eval(session = self.sess) + \
self.seed_score_avgs.eval(session = self.sess)).transpose(),
index = self.data_names['seeds'], columns = self.data_names['CLs'])
seed_df = seed_df.where(self.n_used_hairpins_per_seed >= self.min_hairpins_per, other = np.nan)
seed_df.to_csv(os.path.join(results_dir, 'seed_data.csv'))
def make_edward_model(self, LFC_mats, gene_matrix, seed_matrix, data_names):
'''Create a Bayesian model in edward, using current parameter estimates to initialize'''
#define priors on parameters
gene_score = Normal(loc=tf.zeros([self.n_CLs, self.n_genes], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['rel_gene_l2_lambda']) * tf.ones([self.n_CLs, self.n_genes], dtype = self.float))
seed_score = Normal(loc=tf.zeros([self.n_CLs, self.n_seeds], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['rel_seed_l2_lambda']) * tf.ones([self.n_CLs, self.n_seeds], dtype = self.float))
gene_score_avgs = Normal(loc=tf.zeros([1, self.n_genes], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['gene_l2_lambda']) * tf.ones([1, self.n_genes], dtype = self.float))
seed_score_avgs = Normal(loc=tf.zeros([1, self.n_seeds], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['seed_l2_lambda']) * tf.ones([1, self.n_seeds], dtype = self.float))
hairpin_offset = Normal(loc=tf.zeros([self.n_hp_batches, 1], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['hairpin_l2_lambda']) * tf.ones([self.n_hp_batches, 1], dtype = self.float))
hairpin_unpred = Normal(loc=tf.zeros([self.n_hairpins, 1], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['hp_unpred_l2_lambda']) * tf.ones([self.n_hairpins, 1], dtype = self.float))
CL_offset = Normal(loc=tf.zeros([self.n_CL_batches, 1], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['CL_l2_lambda']) * tf.ones([self.n_CL_batches, 1], dtype = self.float))
#parameters we dont try to fit here
CL_slope = tf.constant(self.CL_slope.eval(session = self.sess))
gene_slope = tf.constant(self.gene_slope.eval(session = self.sess))
# region_weights = tf.constant(self.region_weights.eval(session = self.sess))
guide_Geff = tf.constant(self.guide_Geff.eval(session = self.sess))
guide_Seff = tf.constant(self.guide_Seff.eval(session = self.sess))
gene_maps = [self.make_sparse_submap(gene_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
seed_maps = [self.make_sparse_submap(seed_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
self.noise_sigma = tf.sqrt(tf.exp(tf.Variable(np.log(self.CL_noise_vars.eval(session = self.sess)), dtype = self.float)))
mod_params = {
'CL_offset': CL_offset,
'CL_slope': CL_slope,
'guide_Seff': guide_Seff,
'guide_Geff': guide_Geff,
'hairpin_offset': hairpin_offset,
'hairpin_unpred': hairpin_unpred,
'gene_slope': gene_slope,
'seed_score_avgs': seed_score_avgs,
'seed_score': seed_score,
'gene_score_avgs': gene_score_avgs,
'gene_score': gene_score
}
bool_masks = [tf.logical_not(tf.is_nan(LFC_mat.values)) for LFC_mat in LFC_mats]
y_list = []
CL_cnt = 0
hp_cnt = 0
for ii in range(len(self.obs)):
cur_pred = self.get_dataset_pred(
mod_params,
gene_maps[ii],
seed_maps[ii],
LFC_mats[ii].index.values,
LFC_mats[ii].columns.values,
hp_cnt,
CL_cnt)
cur_CL_inds = np.arange(LFC_mats[ii].shape[1]) + CL_cnt
hp_cnt += LFC_mats[ii].shape[0]
CL_cnt += LFC_mats[ii].shape[1]
cur_sigma = tf.transpose(tf.gather(self.noise_sigma, cur_CL_inds)) * tf.ones_like(cur_pred)
y_list.append(Normal(loc=tf.boolean_mask(cur_pred, bool_masks[ii]),
scale = tf.boolean_mask(cur_sigma, bool_masks[ii])))
LFC_mats_no_na = []
for LFC_mat in LFC_mats:
cur = LFC_mat.values.copy()
cur[np.isnan(cur)] = 0
LFC_mats_no_na.append(cur)
# obs_list = [tf.constant(LFC_mat, dtype = 'float') for LFC_mat in LFC_mats_no_na]
obs_list = [tf.placeholder(self.float, dset.shape) for dset in LFC_mats_no_na]
#posterior approximating distributions (fully factorized gaussian)
self.q_gene_score = Normal(loc=tf.Variable(self.gene_score.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_CLs, self.n_genes], dtype = self.float))))
self.q_seed_score = Normal(loc=tf.Variable(self.seed_score.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_CLs, self.n_seeds], dtype = self.float))))
self.q_gene_score_avgs = Normal(loc=tf.Variable(self.gene_score_avgs.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1, self.n_genes], dtype = self.float))))
self.q_seed_score_avgs = Normal(loc=tf.Variable(self.seed_score_avgs.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1, self.n_seeds], dtype = self.float))))
self.q_hairpin_offset = Normal(loc=tf.Variable(self.hairpin_offset.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_hp_batches, 1], dtype = self.float))))
self.q_hairpin_unpred = Normal(loc=tf.Variable(self.hairpin_unpred.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_hairpins, 1], dtype = self.float))))
self.q_CL_offset = Normal(loc=tf.Variable(self.CL_offset.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_CL_batches, 1], dtype = self.float))))
data_dict = {i: tf.boolean_mask(d, m) for i, d, m in zip(y_list, obs_list, bool_masks)}
for i, d in zip(obs_list, LFC_mats):
data_dict.update({i: d})
self.inference = ed.KLqp({gene_score: self.q_gene_score,
seed_score: self.q_seed_score,
gene_score_avgs: self.q_gene_score_avgs,
seed_score_avgs: self.q_seed_score_avgs,
hairpin_offset: self.q_hairpin_offset,
hairpin_unpred: self.q_hairpin_unpred,
CL_offset: self.q_CL_offset},
data=data_dict)
self.inference.initialize()
tf.global_variables_initializer().run()
def run_edward_inference(self, n_iter = 1000, print_freq = 100):
loss_evals = np.zeros(n_iter)
orig_GS = self.gene_score.eval(session = self.sess).flatten()
tot_GS_var = np.var(orig_GS)
delta_G_R2 = 1 - np.var(orig_GS - self.q_gene_score.mean().eval().flatten()) / tot_GS_var
self.write('Init DeltaG_R2: {}'.format(delta_G_R2))
for ii in range(n_iter):
info_dict = self.inference.update()
loss_evals[ii] = info_dict['loss']
if ii % print_freq == 0:
delta_G_R2 = 1 - np.var(orig_GS - self.q_gene_score.mean().eval().flatten()) / tot_GS_var
self.write('It: {}, DeltaG_R2: {}, Loss: {}'.format(ii, delta_G_R2, loss_evals[ii]))
return(loss_evals)
## prediction
def get_dataset_pred(self, mod_params, gene_map, seed_map, cur_hp_seqs, cur_CL_names, hp_offset, CL_offset, just_avg_scores = False):
cur_hp_inds = np.array([self.hp_ind_map[x] for x in cur_hp_seqs])
cur_CL_inds = np.array([self.CL_ind_map[x] for x in cur_CL_names])
CL_batch_range = CL_offset + np.arange(len(cur_CL_names))
hp_batch_range = hp_offset + np.arange(len(cur_hp_seqs))
if just_avg_scores:
cur_gene_effect = map_effects(mod_params['gene_score_avgs'] + tf.zeros_like(mod_params['gene_score'], dtype = self.float),
cur_CL_inds, gene_map)
else:
cur_gene_effect = map_effects(mod_params['gene_score'] + mod_params['gene_score_avgs'],
cur_CL_inds, gene_map)
cur_gene_effect = tf.multiply(cur_gene_effect,
tf.gather(mod_params['guide_Geff'],cur_hp_inds))
cur_gene_effect = tf.multiply(cur_gene_effect,
tf.reshape(tf.gather(mod_params['gene_slope'], cur_CL_inds), [1, -1]))
if just_avg_scores:
cur_seed_effect = map_effects(mod_params['seed_score_avgs'] + tf.zeros_like(mod_params['seed_score'], dtype = self.float),
cur_CL_inds, seed_map)
else:
cur_seed_effect = map_effects(mod_params['seed_score'] + mod_params['seed_score_avgs'],
cur_CL_inds, seed_map)
cur_seed_effect = tf.multiply(cur_seed_effect,
tf.gather(mod_params['guide_Seff'], cur_hp_inds))
#total KD effect of each hp
cur_KD_effect = tf.gather(mod_params['hairpin_unpred'], cur_hp_inds) + cur_gene_effect + cur_seed_effect
cur_KD_effect = tf.multiply(cur_KD_effect,
tf.reshape(tf.gather(mod_params['CL_slope'], CL_batch_range), [1, -1]))
cur_pred = cur_KD_effect + tf.reshape(tf.gather(mod_params['CL_offset'], CL_batch_range), [1, -1]) + \
tf.gather(mod_params['hairpin_offset'], hp_batch_range) #add offset terms
return(cur_pred)
def get_dataset_LL(self, mod_params, LFC_mat, preds, cur_eval_mask, CL_offset):
CL_batch_range = CL_offset + np.arange(LFC_mat.get_shape().as_list()[1])
cur_nLL = 0.5 * tf.reduce_sum(
tf.boolean_mask(
tf.multiply(tf.pow(preds - LFC_mat, 2),
1/tf.reshape(tf.gather(mod_params['CL_noise_vars'], CL_batch_range), [1, -1])),
cur_eval_mask
))
cur_SS = 0.5 * tf.reduce_sum(
| |
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
extern_point_idx = ui > 0
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
cav_coords = cav_coords[extern_point_idx]
int3c2e = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
nao_pair = v_nj.shape[0]
v_phi = numpy.zeros((natm, ngrid_1sph, nao, nao))
v_phi[extern_point_idx] += v_nj.transpose(2,0,1)
phi = numpy.einsum('n,xn,jn,jnpq->jxpq', weights_1sph, ylm_1sph, ui, v_phi)
Xvec = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi.reshape(natm*nlm,-1))
Xvec = Xvec.reshape(natm,nlm,nao,nao)
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = numpy.einsum('gi,gj->gij', ao, aow)
psi = numpy.zeros((natm, nlm, nao, nao))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + fak_pol[0].shape[1]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi[ia,p0:p1] = -fac * numpy.einsum('mn,nij->mij', fak_pol[l], aopair[i0:i1])
B = lib.einsum('nlpq,nlrs->pqrs', psi, Xvec)
B = B + B.transpose(2,3,0,1)
return B
def make_B1(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
'''1st order'''
mol = pcmobj.mol
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
extern_point_idx = ui > 0
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
cav_coords = cav_coords[extern_point_idx]
int3c2e = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas,
mol._env, int3c2e)
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
nao_pair = v_nj.shape[0]
v_phi = numpy.zeros((natm, ngrid_1sph, nao, nao))
v_phi[extern_point_idx] += v_nj.transpose(2,0,1)
phi0 = numpy.einsum('n,xn,jn,jnpq->jxpq', weights_1sph, ylm_1sph, ui, v_phi)
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.reshape(natm*nlm,-1))
Xvec0 = Xvec0.reshape(natm,nlm,nao,nao)
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = numpy.einsum('gi,gj->gij', ao, aow)
psi0 = numpy.zeros((natm, nlm, nao, nao))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + fak_pol[0].shape[1]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi0[ia,p0:p1] = -fac * numpy.einsum('mn,nij->mij', fak_pol[l], aopair[i0:i1])
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
phi1 = numpy.zeros(ui1.shape[:3] + (nlm,nao,nao))
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
aoslices = mol.aoslice_by_atom()
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1')
phi1[:,:,ia] += lib.einsum('n,ln,azn,ijn->azlij', weights_1sph, ylm_1sph, ui1[:,:,ia], v_nj)
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3, aosym='s1')
v_e2_nj = v_e1_nj + v_e1_nj.transpose(0,2,1,3)
phi1[ia,:,ia] += lib.einsum('n,ln,n,xijn->xlij', weights_1sph, ylm_1sph, ui[ia], v_e2_nj)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
v = numpy.einsum('n,ln,n,xijn->xlij', weights_1sph, ylm_1sph, ui[ia], v_e1_nj[:,p0:p1])
phi1[ja,:,ia,:,p0:p1,:] -= v
phi1[ja,:,ia,:,:,p0:p1] -= v.transpose(0,1,3,2)
psi1 = numpy.zeros((natm,3,natm,nlm,nao,nao))
i1 = 0
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
i0, i1 = i1, i1 + weight.size
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
aow = numpy.einsum('gi,g->gi', ao[0], weight)
aopair1 = lib.einsum('xgi,gj->xgij', ao[1:], aow)
aow = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
aopair0 = lib.einsum('zxgi,gj->zxgij', aow, ao[0])
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi1[: ,:,ia,p0:p1] -= fac*numpy.einsum('mn,zxnij->zxmij', fak_pol[l], aopair0)
vtmp = fac*numpy.einsum('mn,xnij->xmij', fak_pol[l], aopair1)
psi1[ia,:,ia,p0:p1] -= vtmp
psi1[ia,:,ia,p0:p1] -= vtmp.transpose(0,1,3,2)
for ja in range(natm):
shl0, shl1, q0, q1 = aoslices[ja]
psi1[ja,:,ia,p0:p1,q0:q1,:] += vtmp[:,:,q0:q1]
psi1[ja,:,ia,p0:p1,:,q0:q1] += vtmp[:,:,q0:q1].transpose(0,1,3,2)
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1.transpose(2,3,0,1,4,5).reshape(natm*nlm,-1))
Xvec1 = Xvec1.reshape(natm,nlm,natm,3,nao,nao).transpose(2,3,0,1,4,5)
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.reshape(natm*nlm,-1))
LS0 = LS0.reshape(natm,nlm,nao,nao)
B = lib.einsum('ixnlpq,nlrs->ixpqrs', psi1, Xvec0)
B+= lib.einsum('nlpq,ixnlrs->ixpqrs', psi0, Xvec1)
B-= lib.einsum('ilpq,aziljm,jmrs->azpqrs', LS0, L1, Xvec0)
B = B + B.transpose(0,1,4,5,2,3)
return B
def B1_dot_x(pcmobj, dm, r_vdw, ui, ylm_1sph, cached_pol, L):
mol = pcmobj.mol
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
dms = numpy.asarray(dm)
is_single_dm = dms.ndim == 2
dms = dms.reshape(-1,nao,nao)
n_dm = dms.shape[0]
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
aoslices = mol.aoslice_by_atom()
grids = pcmobj.grids
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
extern_point_idx = ui > 0
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
Bx = numpy.zeros((natm,3,nao,nao))
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = numpy.einsum('gi,gj->gij', ao, aow)
den = numpy.einsum('gij,ij->g', aopair, dm)
psi0 = numpy.zeros((natm, nlm))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
i0, i1 = i1, i1 + fac_pol.shape[1]
psi0[ia] = -numpy.einsum('mn,n->m', fac_pol, den[i0:i1])
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.ravel())
LS0 = LS0.reshape(natm,nlm)
phi0 = numpy.zeros((natm,nlm))
phi1 = numpy.zeros((natm,3,natm,nlm))
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
cintopt_ip1 = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e_ip1)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
v_phi = numpy.einsum('pqg,pq->g', v_nj, dm)
phi0[ia] = numpy.einsum('n,ln,n,n->l', weights_1sph, ylm_1sph, ui[ia], v_phi)
phi1[:,:,ia] += lib.einsum('n,ln,azn,n->azl', weights_1sph, ylm_1sph, ui1[:,:,ia], v_phi)
Bx += lib.einsum('l,n,ln,azn,ijn->azij', LS0[ia], weights_1sph, ylm_1sph, ui1[:,:,ia], v_nj)
wtmp = lib.einsum('n,ln,n->ln', weights_1sph, ylm_1sph, ui[ia])
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3,
aosym='s1', cintopt=cintopt_ip1)
vtmp = lib.einsum('l,ln,xijn->xij', LS0[ia], wtmp, v_e1_nj)
Bx[ia] += vtmp
Bx[ia] += vtmp.transpose(0,2,1)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
Bx[ja,:,p0:p1,:] -= vtmp[:,p0:p1]
Bx[ja,:,:,p0:p1] -= vtmp[:,p0:p1].transpose(0,2,1)
tmp = numpy.einsum('xijn,ij->xn', v_e1_nj[:,p0:p1], dm[p0:p1])
tmp += numpy.einsum('xijn,ji->xn', v_e1_nj[:,p0:p1], dm[:,p0:p1])
phitmp = numpy.einsum('ln,xn->xl', wtmp, tmp)
phi1[ja,:,ia] -= phitmp
phi1[ia,:,ia] += phitmp
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.ravel())
Xvec0 = Xvec0.reshape(natm,nlm)
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
phi1 -= lib.einsum('aziljm,jm->azil', L1, Xvec0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1.reshape(-1,natm*nlm).T)
Xvec1 = Xvec1.T.reshape(natm,3,natm,nlm)
psi1 = numpy.zeros((natm,3,natm,nlm))
i1 = 0
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
i0, i1 = i1, i1 + weight.size
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
aow = numpy.einsum('gi,g->gi', ao[0], weight)
aopair1 = lib.einsum('xgi,gj->xgij', ao[1:], aow)
aow = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
aopair0 = lib.einsum('zxgi,gj->zxgij', aow, ao[0])
den0 = numpy.einsum('zxgij,ij->zxg', aopair0, dm)
den1 = numpy.empty((natm,3,weight.size))
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
den1[ja] = numpy.einsum('xgij,ij->xg', aopair1[:,:,p0:p1], dm[p0:p1,:])
den1[ja]+= numpy.einsum('xgij,ji->xg', aopair1[:,:,p0:p1], dm[:,p0:p1])
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
scaled_weights = lib.einsum('azm,mn->azn', Xvec1[:,:,ia], fac_pol)
scaled_weights *= weight
aow = numpy.einsum('gi,azg->azgi', ao[0], scaled_weights)
Bx -= numpy.einsum('gi,azgj->azij', ao[0], aow)
tmp = numpy.einsum('mn,zxn->zxm', fac_pol, den1)
psi1[: ,:,ia] -= numpy.einsum('mn,zxn->zxm', fac_pol, den0)
psi1[ia,:,ia] -= tmp.sum(axis=0)
for ja in range(natm):
psi1[ja,:,ia] += tmp[ja]
eta_nj = lib.einsum('mn,m->n', fac_pol, Xvec0[ia])
Bx -= lib.einsum('n,zxnpq->zxpq', eta_nj, aopair0)
vtmp = lib.einsum('n,xnpq->xpq', eta_nj, aopair1)
Bx[ia] -= vtmp
Bx[ia] -= vtmp.transpose(0,2,1)
for ja in range(natm):
shl0, shl1, q0, q1 = aoslices[ja]
Bx[ja,:,q0:q1,:] += vtmp[:,q0:q1]
Bx[ja,:,:,q0:q1] += vtmp[:,q0:q1].transpose(0,2,1)
psi1 -= numpy.einsum('il,aziljm->azjm', LS0, L1)
LS1 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi1.reshape(-1,natm*nlm).T)
LS1 = LS1.T.reshape(natm,3,natm,nlm)
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
cav_coords = cav_coords[extern_point_idx]
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
v_phi = numpy.zeros((natm, ngrid_1sph, nao, nao))
v_phi[extern_point_idx] += v_nj.transpose(2,0,1)
Bx += lib.einsum('azjx,n,xn,jn,jnpq->azpq', LS1, weights_1sph, ylm_1sph, ui, v_phi)
return Bx
def setUpModule():
global dx, mol0, mol1, mol2, nao, dm
dx = 0.0001
mol0 = gto.M(atom='H 0 0 0; H 0 1 1.2; H 1. .1 0; H .5 .5 1', unit='B')
mol1 = gto.M(atom='H 0 0 %g; H 0 1 1.2; H 1. .1 0; H .5 .5 1'%(-dx), unit='B')
mol2 = gto.M(atom='H 0 0 %g; H 0 1 1.2; H 1. .1 0; H .5 .5 1'%dx, unit='B')
dx = dx * 2
nao = mol0.nao_nr()
numpy.random.seed(1)
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
def tearDownModule():
global dx, mol0, mol1, mol2, nao, dm
del dx, mol0, mol1, mol2, nao, dm
class KnownValues(unittest.TestCase):
def test_e_psi1(self):
def get_e_psi1(pcmobj):
pcmobj.grids.build()
mol = pcmobj.mol
natm = mol.natm
lmax = pcmobj.lmax
r_vdw = ddcosmo.get_atomic_radii(pcmobj)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True))
fi = ddcosmo.make_fi(pcmobj, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
nexposed = numpy.count_nonzero(ui==1)
nbury = numpy.count_nonzero(ui==0)
on_shell = numpy.count_nonzero(ui>0) - nexposed
nlm = (lmax+1)**2
Lmat = ddcosmo.make_L(pcmobj, r_vdw, ylm_1sph, fi)
Lmat = Lmat.reshape(natm*nlm,-1)
cached_pol = ddcosmo.cache_fake_multipoles(pcmobj.grids, r_vdw, lmax)
phi = ddcosmo.make_phi(pcmobj, dm, r_vdw, ui, ylm_1sph)
L_X = numpy.linalg.solve(Lmat, phi.ravel()).reshape(natm,-1)
psi, vmat, L_S = \
ddcosmo.make_psi_vmat(pcmobj, dm, r_vdw, ui, ylm_1sph,
cached_pol, L_X, Lmat)
psi1 = ddcosmo_grad.make_e_psi1(pcmobj, dm, r_vdw, ui, ylm_1sph,
cached_pol, L_X, Lmat)
return L_X, psi, psi1
pcmobj = ddcosmo.DDCOSMO(mol0)
L_X, psi0, psi1 = get_e_psi1(pcmobj)
pcmobj = ddcosmo.DDCOSMO(mol1)
L_X1, psi = | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RegistrationArgs', 'Registration']
@pulumi.input_type
class RegistrationArgs:
def __init__(__self__, *,
contact_settings: pulumi.Input['ContactSettingsArgs'],
domain_name: pulumi.Input[str],
yearly_price: pulumi.Input['MoneyArgs'],
contact_notices: Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationContactNoticesItem']]]] = None,
dns_settings: Optional[pulumi.Input['DnsSettingsArgs']] = None,
domain_notices: Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationDomainNoticesItem']]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
management_settings: Optional[pulumi.Input['ManagementSettingsArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
validate_only: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Registration resource.
:param pulumi.Input['ContactSettingsArgs'] contact_settings: Settings for contact information linked to the `Registration`. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureContactSettings` method.
:param pulumi.Input[str] domain_name: Immutable. The domain name. Unicode domain names must be expressed in Punycode format.
:param pulumi.Input['MoneyArgs'] yearly_price: Yearly price to register or renew the domain. The value that should be put here can be obtained from RetrieveRegisterParameters or SearchDomains calls.
:param pulumi.Input[Sequence[pulumi.Input['RegistrationContactNoticesItem']]] contact_notices: The list of contact notices that the caller acknowledges. The notices needed here depend on the values specified in `registration.contact_settings`.
:param pulumi.Input['DnsSettingsArgs'] dns_settings: Settings controlling the DNS configuration of the `Registration`. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureDnsSettings` method.
:param pulumi.Input[Sequence[pulumi.Input['RegistrationDomainNoticesItem']]] domain_notices: The list of domain notices that you acknowledge. Call `RetrieveRegisterParameters` to see the notices that need acknowledgement.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Set of labels associated with the `Registration`.
:param pulumi.Input['ManagementSettingsArgs'] management_settings: Settings for management of the `Registration`, including renewal, billing, and transfer. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureManagementSettings` method.
:param pulumi.Input[bool] validate_only: When true, only validation is performed, without actually registering the domain. Follows: https://cloud.google.com/apis/design/design_patterns#request_validation
"""
pulumi.set(__self__, "contact_settings", contact_settings)
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "yearly_price", yearly_price)
if contact_notices is not None:
pulumi.set(__self__, "contact_notices", contact_notices)
if dns_settings is not None:
pulumi.set(__self__, "dns_settings", dns_settings)
if domain_notices is not None:
pulumi.set(__self__, "domain_notices", domain_notices)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if management_settings is not None:
pulumi.set(__self__, "management_settings", management_settings)
if project is not None:
pulumi.set(__self__, "project", project)
if validate_only is not None:
pulumi.set(__self__, "validate_only", validate_only)
@property
@pulumi.getter(name="contactSettings")
def contact_settings(self) -> pulumi.Input['ContactSettingsArgs']:
"""
Settings for contact information linked to the `Registration`. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureContactSettings` method.
"""
return pulumi.get(self, "contact_settings")
@contact_settings.setter
def contact_settings(self, value: pulumi.Input['ContactSettingsArgs']):
pulumi.set(self, "contact_settings", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
Immutable. The domain name. Unicode domain names must be expressed in Punycode format.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="yearlyPrice")
def yearly_price(self) -> pulumi.Input['MoneyArgs']:
"""
Yearly price to register or renew the domain. The value that should be put here can be obtained from RetrieveRegisterParameters or SearchDomains calls.
"""
return pulumi.get(self, "yearly_price")
@yearly_price.setter
def yearly_price(self, value: pulumi.Input['MoneyArgs']):
pulumi.set(self, "yearly_price", value)
@property
@pulumi.getter(name="contactNotices")
def contact_notices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationContactNoticesItem']]]]:
"""
The list of contact notices that the caller acknowledges. The notices needed here depend on the values specified in `registration.contact_settings`.
"""
return pulumi.get(self, "contact_notices")
@contact_notices.setter
def contact_notices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationContactNoticesItem']]]]):
pulumi.set(self, "contact_notices", value)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional[pulumi.Input['DnsSettingsArgs']]:
"""
Settings controlling the DNS configuration of the `Registration`. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureDnsSettings` method.
"""
return pulumi.get(self, "dns_settings")
@dns_settings.setter
def dns_settings(self, value: Optional[pulumi.Input['DnsSettingsArgs']]):
pulumi.set(self, "dns_settings", value)
@property
@pulumi.getter(name="domainNotices")
def domain_notices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationDomainNoticesItem']]]]:
"""
The list of domain notices that you acknowledge. Call `RetrieveRegisterParameters` to see the notices that need acknowledgement.
"""
return pulumi.get(self, "domain_notices")
@domain_notices.setter
def domain_notices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationDomainNoticesItem']]]]):
pulumi.set(self, "domain_notices", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Set of labels associated with the `Registration`.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="managementSettings")
def management_settings(self) -> Optional[pulumi.Input['ManagementSettingsArgs']]:
"""
Settings for management of the `Registration`, including renewal, billing, and transfer. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureManagementSettings` method.
"""
return pulumi.get(self, "management_settings")
@management_settings.setter
def management_settings(self, value: Optional[pulumi.Input['ManagementSettingsArgs']]):
pulumi.set(self, "management_settings", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="validateOnly")
def validate_only(self) -> Optional[pulumi.Input[bool]]:
"""
When true, only validation is performed, without actually registering the domain. Follows: https://cloud.google.com/apis/design/design_patterns#request_validation
"""
return pulumi.get(self, "validate_only")
@validate_only.setter
def validate_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "validate_only", value)
class Registration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contact_notices: Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationContactNoticesItem']]]] = None,
contact_settings: Optional[pulumi.Input[pulumi.InputType['ContactSettingsArgs']]] = None,
dns_settings: Optional[pulumi.Input[pulumi.InputType['DnsSettingsArgs']]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
domain_notices: Optional[pulumi.Input[Sequence[pulumi.Input['RegistrationDomainNoticesItem']]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
management_settings: Optional[pulumi.Input[pulumi.InputType['ManagementSettingsArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
validate_only: Optional[pulumi.Input[bool]] = None,
yearly_price: Optional[pulumi.Input[pulumi.InputType['MoneyArgs']]] = None,
__props__=None):
"""
Registers a new domain name and creates a corresponding `Registration` resource. Call `RetrieveRegisterParameters` first to check availability of the domain name and determine parameters like price that are needed to build a call to this method. A successful call creates a `Registration` resource in state `REGISTRATION_PENDING`, which resolves to `ACTIVE` within 1-2 minutes, indicating that the domain was successfully registered. If the resource ends up in state `REGISTRATION_FAILED`, it indicates that the domain was not registered successfully, and you can safely delete the resource and retry registration.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input['RegistrationContactNoticesItem']]] contact_notices: The list of contact notices that the caller acknowledges. The notices needed here depend on the values specified in `registration.contact_settings`.
:param pulumi.Input[pulumi.InputType['ContactSettingsArgs']] contact_settings: Settings for contact information linked to the `Registration`. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureContactSettings` method.
:param pulumi.Input[pulumi.InputType['DnsSettingsArgs']] dns_settings: Settings controlling the DNS configuration of the `Registration`. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureDnsSettings` method.
:param pulumi.Input[str] domain_name: Immutable. The domain name. Unicode domain names must be expressed in Punycode format.
:param pulumi.Input[Sequence[pulumi.Input['RegistrationDomainNoticesItem']]] domain_notices: The list of domain notices that you acknowledge. Call `RetrieveRegisterParameters` to see the notices that need acknowledgement.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Set of labels associated with the `Registration`.
:param pulumi.Input[pulumi.InputType['ManagementSettingsArgs']] management_settings: Settings for management of the `Registration`, including renewal, billing, and transfer. You cannot update these with the `UpdateRegistration` method. To update these settings, use the `ConfigureManagementSettings` method.
:param pulumi.Input[bool] validate_only: When true, only validation is performed, without actually registering the domain. Follows: https://cloud.google.com/apis/design/design_patterns#request_validation
:param pulumi.Input[pulumi.InputType['MoneyArgs']] yearly_price: Yearly price to register or renew the domain. The value that should be put here can be obtained from RetrieveRegisterParameters or SearchDomains calls.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegistrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Registers a new domain name and creates a corresponding `Registration` resource. Call `RetrieveRegisterParameters` first to check availability of the domain name and determine parameters like price that are needed to build a call to this method. A successful call creates a `Registration` resource in state `REGISTRATION_PENDING`, which resolves to `ACTIVE` within 1-2 minutes, indicating that the domain was successfully registered. If the resource ends up in state `REGISTRATION_FAILED`, it indicates that the domain was not registered successfully, and you can safely delete the resource and retry registration.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param RegistrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegistrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
| |
<reponame>MateuszDabrowski/elquent
#!/usr/bin/env python3.6
# -*- coding: utf8 -*-
'''
ELQuent.campaign
Campaign generator utilizing other modules
<NAME>
github.com/MateuszDabrowski
linkedin.com/in/mateusz-dabrowski-marketing/
'''
# Python imports
import os
import re
import sys
import json
import pyperclip
from colorama import Fore, Style, init
# ELQuent imports
import utils.api.api as api
import utils.helper as helper
import utils.page as page
import utils.mail as mail
# Initialize colorama
init(autoreset=True)
# Globals
naming = None
source_country = None
campaign_name = None
converter_choice = None
webinar_epoch = None
product_name = None
header_text = None
regex_asset_name = None
regex_asset_url = None
regex_product_name = None
regex_header_text = None
regex_gtm = None
# Predefined messege elements
ERROR = f'{Fore.WHITE}[{Fore.RED}ERROR{Fore.WHITE}] {Fore.YELLOW}'
WARNING = f'{Fore.WHITE}[{Fore.YELLOW}WARNING{Fore.WHITE}] '
SUCCESS = f'{Fore.WHITE}[{Fore.GREEN}SUCCESS{Fore.WHITE}] '
YES = f'{Style.BRIGHT}{Fore.GREEN}y{Fore.WHITE}{Style.NORMAL}'
NO = f'{Style.BRIGHT}{Fore.RED}n{Fore.WHITE}{Style.NORMAL}'
def country_naming_setter(country):
'''
Sets source_country for all functions
Loads json file with naming convention
'''
global source_country
source_country = country
# Prepares globals for imported modules
page.country_naming_setter(source_country)
helper.country_naming_setter(source_country)
# Loads json file with naming convention
with open(file('naming'), 'r', encoding='utf-8') as f:
global naming
naming = json.load(f)
'''
=================================================================================
File Path Getter
=================================================================================
'''
def file(file_path, name='LP'):
'''
Returns file path to template files
'''
def find_data_file(filename, directory='templates'):
'''
Returns correct file path for both script and frozen app
'''
if directory == 'templates': # For reading template files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, 'utils', directory, filename)
elif directory == 'api': # For reading api files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, 'utils', directory, filename)
elif directory == 'outcomes': # For writing outcome files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, directory, filename)
file_paths = {
'naming': find_data_file('naming.json', directory='api'),
'jquery': find_data_file('WKCORP_LP_jquery.txt'),
'simple-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_simple.json'),
'basic-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_basic.json'),
'alert-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_alert.json'),
'alert-ab-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_alert-ab.json'),
'ebook-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_ebook.json'),
'code-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_code.json'),
'demo-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_demo.json'),
'webinar-campaign': find_data_file(f'WK{source_country}_CAMPAIGN_webinar.json'),
'field-merge': find_data_file(f'WK{source_country}_VOUCHER_field-merge.json'),
'asset-eml': find_data_file(f'WK{source_country}_EML_asset.txt'),
'demo-eml': find_data_file(f'WK{source_country}_EML_demo.txt'),
'code-eml': find_data_file(f'WK{source_country}_EML_code.txt'),
'before-webinar-eml': find_data_file(f'WK{source_country}_EML_before-webinar.txt'),
'lp-template': find_data_file(f'WK{source_country}_LP_template.txt'),
'ty-lp': find_data_file(f'WK{source_country}_LP_thank-you.txt'),
'form-design': find_data_file(f'WK{source_country}_FORM_design-template.json'),
'form-processing': find_data_file(f'WK{source_country}_FORM_processing-template.json'),
'form-html': find_data_file(f'WK{source_country}_FORM_html-template.txt'),
'form-css': find_data_file(f'WK{source_country}_FORM_css-template.txt'),
'outcome-file': find_data_file(f'WK{source_country}_{name}.txt', directory='outcomes')
}
return file_paths.get(file_path)
'''
=================================================================================
CAMPAING GENERATION HELPER FUNCTIONS
=================================================================================
'''
def campaign_compile_regex():
'''
Creates global regex compiles for campaign flows
'''
global regex_asset_name
global regex_asset_url
global regex_product_name
global regex_header_text
global regex_gtm
regex_asset_name = re.compile(r'ASSET_NAME', re.UNICODE)
regex_asset_url = re.compile(r'ASSET_URL', re.UNICODE)
regex_product_name = re.compile(r'PRODUCT_NAME', re.UNICODE)
regex_header_text = re.compile(r'OPTIONAL_TEXT', re.UNICODE)
regex_gtm = re.compile(r'<SITE_NAME>', re.UNICODE)
return
def campaign_first_mail(main_lp_url='', mail_html='', camp_name='', ab_test=False, reminder=True):
'''
Creates first mail and its reminder
Returns eloqua id of both
'''
# Creates first mail from package
if main_lp_url:
mail_html = mail.mail_constructor(source_country, campaign=main_lp_url)
elif not mail_html: # If we don't know target URL nor have html from paste
mail_html = mail.mail_constructor(source_country, campaign='linkless')
if not mail_html and not reminder:
return False
elif not mail_html and reminder:
return False, False
# if there was not iterated campaign name provided, keep standard campaign name
if not camp_name:
camp_name = campaign_name
# Create e-mail
if ab_test:
mail_name = ('_'.join(camp_name[0:4]) + '_A-EML')
reminder_html = mail_html
lead_regex = re.compile(
r'<!-- Lead START --> (.*?)( |)&&( |)(.*?) <!-- Lead END -->', re.UNICODE)
if lead_regex.search(mail_html):
mail_html = lead_regex.sub(r'\g<1>', mail_html)
reminder_html = lead_regex.sub(r'\g<4>', reminder_html)
else:
mail_name = ('_'.join(camp_name[0:4]) + '_EML')
mail_html = re.sub(
r'<!-- Lead START --> (.*?) <!-- Lead END -->', r'\g<1>', mail_html)
mail_id = api.eloqua_create_email(mail_name, mail_html)
if not reminder:
return mail_id
if not ab_test:
regex_mail_preheader = re.compile(
r'<!--pre-start.*?pre-end-->', re.UNICODE)
while True:
print(f'\n{Fore.YELLOW}»{Fore.WHITE} Write or copypaste {Fore.YELLOW}pre-header{Fore.WHITE} text for',
f'{Fore.YELLOW}reminder{Fore.WHITE} e-mail and click [Enter]',
f'\n{Fore.WHITE}[S]kip to keep the same pre-header as in main e-mail.')
reminder_preheader = input(' ')
if not reminder_preheader:
reminder_preheader = pyperclip.paste()
if not reminder_preheader:
print(f'\n{ERROR}Pre-header can not be blank')
continue
elif len(reminder_preheader) > 140:
print(f'\n{ERROR}Pre-header is over 140 characters long')
continue
else:
break
if reminder_preheader.lower() != 's':
reminder_preheader = '<!--pre-start-->' + reminder_preheader + '<!--pre-end-->'
reminder_html = regex_mail_preheader.sub(
reminder_preheader, mail_html)
else:
reminder_html = mail_html
# Create e-mail reminder
if ab_test:
reminder_name = ('_'.join(camp_name[0:4]) + '_B-EML')
else:
reminder_name = ('_'.join(camp_name[0:4]) + '_REM-EML')
reminder_id = api.eloqua_create_email(reminder_name, reminder_html)
return (mail_id, reminder_id)
def campaign_main_page(form_id=''):
'''
Builds main landing page with main form in Eloqua
Returns main LP ID and main Form ID
'''
# Creates form if there is no id from user
if not form_id:
print(
f'\n{Fore.WHITE}» [{Fore.YELLOW}REQUIRED{Fore.WHITE}] Form for main LP', end='')
form_html = page.modify_form(form_id)
# Creates LP
file_name = ('_'.join(campaign_name[1:4]) + '_LP')
with open(file('lp-template'), 'r', encoding='utf-8') as f:
code = f.read()
code = page.swap_form(code, form_html)
code = page.javascript(code)
code = regex_product_name.sub(product_name, code)
code = regex_header_text.sub(header_text, code)
code = regex_gtm.sub(f'WK{source_country}_{file_name}', code)
for i in range(len(naming[source_country]['converter']['Placeholders'])):
placeholder = naming[source_country]['converter']['Placeholders'][i]
regex_converter = re.compile(rf'{placeholder}', re.UNICODE)
converter_value = naming[source_country]['converter'][converter_choice][i]
code = regex_converter.sub(rf'{converter_value}', code)
# Saves to Outcomes file
print(
f'{Fore.WHITE}» [{Fore.YELLOW}SAVING{Fore.WHITE}] WK{source_country}_{file_name}')
with open(file('outcome-file', file_name), 'w', encoding='utf-8') as f:
f.write(code)
# Saves to Eloqua
main_lp_id, _, main_lp_url = api.eloqua_create_landingpage(file_name, code)
# Gets main form id for future campaign canvas API calls
form_id_regex = re.compile(r'id="form(\d+?)"', re.UNICODE)
main_form_id = form_id_regex.findall(form_html)[0]
return (main_lp_id, main_lp_url, main_form_id)
def campaign_main_form():
'''
Creates main campaign form in Eloqua
Returns code, id, and json file of that form
'''
form_folder_id = naming[source_country]['id']['form'].get(
campaign_name[1])
form_name = '_'.join(campaign_name[0:4]) + '_FORM'
form_html_name = api.eloqua_asset_html_name(form_name)
# Loads json data for blindform creation and fills it with name and html_name
with open(file('form-design'), 'r', encoding='utf-8') as f:
form_json = json.load(f)
form_json['name'] = form_name
form_json['htmlName'] = form_html_name
form_json['folderId'] = form_folder_id
# Creates form with given data
form_id, form_json = api.eloqua_create_form(form_name, form_json)
# Prepares HTML Code of the form
with open(file('form-html'), 'r', encoding='utf-8') as f:
form_html = f.read()
form_html = form_html.replace('FORM_ID', form_id)
# Updates form with HTML
form_id, form_json = api.eloqua_update_form(form_id, html=form_html)
return (form_html, form_id, form_json)
def campaign_update_form(form_html, form_id, form_json, asset_mail_id, ty_page_id, from_a_form):
'''
Updates main form with asset_mail_id, ty_page_id, form_id, from_a_form, psp, lead_status
'''
# Gets CSS Code of the form
with open(file('form-css'), 'r', encoding='utf-8') as f:
form_css = f.read()
for field in form_json['elements']:
if field['htmlName'] == 'emailAddress':
email_field_id = field['id']
elif field['htmlName'] == 'firstName':
firstname_field_id = field['id']
elif field['htmlName'] == 'lastName':
lastname_field_id = field['id']
elif field['htmlName'] == 'jobTitleFreeText1':
jobtitle_field_id = field['id']
elif field['htmlName'] == 'company':
company_field_id = field['id']
elif field['htmlName'] == 'busPhone':
phone_field_id = field['id']
elif field['htmlName'] == 'utm_source':
source_field_id = field['id']
elif field['htmlName'] == 'utm_campaign':
detail_field_id = field['id']
elif field['htmlName'] == 'utm_medium':
medium_field_id = field['id']
elif field['htmlName'] == 'utm_content':
content_field_id = field['id']
elif field['htmlName'] == 'utm_term':
term_field_id = field['id']
elif field['htmlName'] == 'form_url':
url_field_id = field['id']
elif field['htmlName'] == 'directMailOptedIn1':
dataoptin_field_id = field['id']
elif field['htmlName'] == 'emailOptedIn1':
emailoptin_field_id = field['id']
elif field['htmlName'] == 'phoneOptedIn1':
phoneoptin_field_id = field['id']
# Gets PSP Cost from name
if '/' in campaign_name[4]:
psp_element = campaign_name[4].split('/')[1]
cost_code = f'{psp_element}_{campaign_name[5]}'
else:
cost_code = campaign_name[3].split('-')
cost_code = '-'.join(cost_code[-2:])
# Gets lead-status for product
while True:
print(
f'\n{Fore.YELLOW}»{Fore.WHITE} Write or copypaste {Fore.YELLOW}Lead Status{Fore.WHITE}.')
lead_status = input(' ')
if lead_status.startswith(f'WK{source_country}_Lead'):
break
else:
print(f'\n{ERROR}Incorrect Lead Status')
# Gets and prepares processing steps json of the form
with open(file('form-processing'), 'r', encoding='utf-8') as f:
form_processing = json.load(f)
# Change to string for easy replacing
form_string = json.dumps(form_processing)
form_string = form_string\
.replace('EMAIL_FIELD_ID', email_field_id)\
.replace('FIRSTNAME_FIELD_ID', firstname_field_id)\
.replace('LASTNAME_FIELD_ID', lastname_field_id)\
.replace('COMPANY_FIELD_ID', company_field_id)\
.replace('JOBTITLE_FIELD_ID', jobtitle_field_id)\
.replace('PHONE_FIELD_ID', phone_field_id)\
.replace('SOURCE_FIELD_ID', source_field_id)\
.replace('DETAIL_FIELD_ID', detail_field_id)\
.replace('MEDIUM_FIELD_ID', medium_field_id)\
.replace('CONTENT_FIELD_ID', content_field_id)\
.replace('TERM_FIELD_ID', term_field_id)\
.replace('URL_FIELD_ID', url_field_id)\
.replace('DATAOPTIN_FIELD_ID', dataoptin_field_id)\
.replace('EMAILOPTIN_FIELD_ID', emailoptin_field_id)\
.replace('PHONEOPTIN_FIELD_ID', phoneoptin_field_id)\
.replace('LEAD_STATUS', lead_status)\
.replace('COST_CODE', cost_code)\
.replace('CAMPAIGN_ELEMENT_ID', from_a_form)\
.replace('ASSET_EMAIL_ID', asset_mail_id)\
.replace('TY_LP_ID', ty_page_id)\
.replace('FORM_ID', form_id)
# Change back to json for API call
form_processing = json.loads(form_string)
api.eloqua_update_form(
form_id,
css=form_css,
html=form_html,
processing=form_processing['processingSteps'],
open_form=True
)
return
def campaign_ty_page(asset_name):
'''
Builds one or two thank you pages after filling main form
'''
file_name = '_'.join(campaign_name[1:4]) + '_TY-LP'
# Gets and prepares general TY LP structure
with open(file('ty-lp'), 'r', encoding='utf-8') as f:
ty_lp_code = f.read()
ty_lp_code = regex_product_name.sub(product_name, ty_lp_code)
ty_lp_code = regex_header_text.sub(header_text, ty_lp_code)
ty_lp_code = regex_asset_name.sub(asset_name, ty_lp_code)
ty_lp_code = regex_gtm.sub(f'WK{source_country}_{file_name}', ty_lp_code)
for i in range(len(naming[source_country]['converter']['Placeholders'])):
placeholder = naming[source_country]['converter']['Placeholders'][i]
regex_converter = re.compile(rf'{placeholder}', re.UNICODE)
converter_value = naming[source_country]['converter'][converter_choice][i]
ty_lp_code = regex_converter.sub(rf'{converter_value}', ty_lp_code)
# Saves to Outcomes file
print(
f'{Fore.WHITE}» [{Fore.YELLOW}SAVING{Fore.WHITE}] WK{source_country}_{file_name}')
with open(file('outcome-file', file_name), 'w', encoding='utf-8') as f:
f.write(ty_lp_code)
# Saves to Eloqua
ty_lp_id, _, _ = api.eloqua_create_landingpage(file_name, ty_lp_code)
return ty_lp_id
def campaign_asset_mail(asset_name, asset_url):
'''
Creates asset e-mail in Eloqua
Returns asset mail id
'''
file_name = ('_'.join(campaign_name[1:4]) + '_asset-TECH-EML')
with open(file('asset-eml'), 'r', encoding='utf-8') as f:
asset_mail_code = f.read()
if converter_choice == 'Webinar Access':
webinar_string = naming[source_country]['webinar']['dateText']
webinar_string = webinar_string\
.replace('INSERT_DATE', helper.epoch_to_date(webinar_epoch))\
.replace('INSERT_HOUR', helper.epoch_to_time(webinar_epoch))
asset_mail_code = asset_mail_code\
.replace('<em>"ASSET_NAME"</em>',
'<em>"ASSET_NAME"</em>.\n' + webinar_string)
asset_mail_code = regex_product_name.sub(product_name, asset_mail_code)
asset_mail_code = regex_asset_name.sub(asset_name, asset_mail_code)
asset_mail_code = regex_asset_url.sub(asset_url, asset_mail_code)
| |
import numpy as np
import configparser
import json
from datetime import datetime
from datetime import timedelta
import os
import logging
import pyvisa
from pydlcp import arduino_board, hotplate, errors, keithley, impedance_analyzer as ia, datastorage, bts
from apscheduler.schedulers.background import BackgroundScheduler
import platform
from typing import List
# Different data type definitions
ard_list = List[arduino_board.ArduinoBoard]
bts_list = List[bts.BTS]
hp_list = List[hotplate.Hotplate]
vcr_type = np.dtype([('V', 'd'), ('C', 'd'), ('R', 'd')])
dlcp_type = np.dtype([('osc_level', 'd'),
('bias', 'd'),
('nominal_bias', 'd'),
('V', 'd'),
('C', 'd'),
('R', 'd')])
class BTSController:
"""
This class provides methods to control a BTS and DLCP experiment interacting with different instruments and saving
to a h5 data store.
Attributes
----------
_activeCVUnit: List[str]
The unit that the impedance analyzer is currently locked to.
_arduinos:
A list with handles to different ArduinoBoard instances controlling the different test units.
_availableResources: List[str]
A list of available pyvisa resources. Used to verify that the resource given by the constructor is available.
_btsAcquisitions: bts_list
A list with the different BTS instances representing a different experimental BTS condition.
_cvSweepParams: dict
A dictionary with the CV sweep acquisition parameters
_data_paths: List[str]
A list containing the paths where the output data a logs will be saved
debug: bool
True if we want to execute in debugging mode
_configMeasurementRequiredOptions: dict
A dictionary containing rules for validating the acquisition parameters and settings
_configSystemRequiredOptions: dict
A dictionary containing rules for validating the system configuration if it is not in its default state
_dlcpParams: dict
A dictionary with the DLCP acquisition parameters
_finishedUnits: List[int]
A list with the number ids of the units that completed the BTS measurement
_hotPlates: hp_list
A list containing instances of the hotplates that control the temperature on each test unit
_impedanceAnalyzer: ia.ImpedanceAnalyzer
An instance to the impedance analyzer object
_keithley: keithley.Keithley
An instance to the keithley source-meter object
_loggingLevels: dict
A map of logging level strings to integers, as defined in the logging module
_mainLogger: logging.Logger
The logger to the class, used to handle any messages within the class.
_measurementConfig: config.ConfigParser
An instance of ConfigParser that contains all the acquisition parameters
_physicalTestUnits: int
The number of hardware units available for use.
_resourceManager:pyvisa.highlevel.ResourceManager
An instance of pyvisa's resource manager to instantiate the instruments from.
_scheduler: BackgroundScheduler
An instance of the BackgroundScheduler
_schedulerRunning: bool
True if the scheduler is running, false otherwise
_testUnits: int
The number of test units configured to use in the BTS measurement
Methods
-------
ramp_up(self, test_unit: int):
Set's the hotplate temperature to the stress temperature. And sets the BTS flag to 'heating_up'. If the unit's
fan is currently on, turns it off.
ramp_down(self, test_unit: int):
Set the hotplate temperature to 25 °C and turns the unit's fan on if it is off. Sets the BTS flag to
'cooling_down'. Disconnects all the pins in the test unit.
start_temperature_log(self, test_unit: int):
Starts the temperature log. Instructs the scheduler to call the method '_log_temperature' according to the
configured setting. Starts the class scheduler if it is not already running. Sets the '_schedulerRunning' to
True.
start_bts(self, test_unit: int):
Starts the bias-temperature stress. This method should be called once the test unit has reached the stress
temperature. It turn's on the voltage (connects all pins on the unit to the voltage source and turns the source
on if it is not on already). Set the BTS flag to 'running_stress'. Instructs the scheduler to call the method
'stop_bts' at time 1 stress_interval unit time later than the current time.
stop_bts(self, test_unit: int):
Calls 'ramp_down' method.
_log_temperature(self, test_unit: int):
Measures the temperature and leakage current through all the devices connected in parallel to the keithley
source meter and saves the log to each of the device's datastores.
"""
_activeCVUnit = []
_arduinos: ard_list = []
_btsAcquisitions: bts_list = []
_cvSweepParams: dict = None
_data_paths: List[str] = []
_dlcpParams: dict = None
_finishedUnits: List[int] = []
_hotPlates: hp_list = []
_impedanceAnalyzer: ia.ImpedanceAnalyzer = None
_keithley: keithley.Keithley = None
_loggingLevels = {'NOTSET': logging.NOTSET,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL}
_mainLogger: logging.Logger = None
_measurementConfig: configparser.ConfigParser = None
_schedulerRunning = False
_testUnits: int = 0
def __init__(self, config_file_url: str, **kwargs):
if not isinstance(config_file_url, str):
raise TypeError('The first argument should be an instance of str.')
self.debug: bool = kwargs.get('debug', False)
system_option_requirements_json = kwargs.get('system_option_requirements_json',
'system_config_required_options.json')
measurement_option_requirements_json = kwargs.get('measurement_options_requirements_json',
'measurement_config_required_options.json')
# Load validation rules for the system configuration file
self._configSystemRequiredOptions = self._read_json_file(system_option_requirements_json)
# Load validation rules for the measurement configuration file
self._configMeasurementRequiredOptions = self._read_json_file(measurement_option_requirements_json)
# Load the system configuration file
config = configparser.ConfigParser()
config.read(config_file_url)
self._physicalTestUnits = config.getint(section='global', option='test_units')
# If the system configuration file is valid, then store it in the object
if self._validate_config(config, self._configSystemRequiredOptions):
self._systemConfig = config
self._resourceManager: pyvisa.highlevel.ResourceManager = pyvisa.highlevel.ResourceManager()
self._availableResources = self._resourceManager.list_resources()
self._scheduler: BackgroundScheduler = BackgroundScheduler()
def ramp_up(self, test_unit: int):
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
hp: hotplate.Hotplate = self._hotPlates[test_unit]
hp.set_temperature(int(bts_acquisition.temperature))
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
if a.fan_status:
a.fan_off()
a.disconnect_all_pins()
bts_acquisition.status = 'heating_up'
def ramp_down(self, test_unit: int):
hp: hotplate.Hotplate = self._hotPlates[test_unit]
hp.set_temperature(25)
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
if not a.fan_status:
a.fan_on()
a.disconnect_all_pins()
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
bts_acquisition.status = 'cooling_down'
def start_temperature_log(self, test_unit: int):
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
self._scheduler.add_job(func=self._log_temperature, trigger='interval', args=[test_unit],
seconds=bts_acquisition.temperature_sampling_interval,
id='temperature_log_unit{0}'.format(test_unit))
if not self._schedulerRunning:
self._scheduler.start()
self._schedulerRunning = True
def start_bts(self, test_unit: int):
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
a.connect_all_pins()
a.connect_keithley()
# If the voltage source is off, turn it on
if not self._keithley.source_on:
self._keithley.turn_source_on()
now: datetime = datetime.now()
later = now + timedelta(seconds=bts_acquisition.stress_interval)
self._scheduler.add_job(func=self.stop_bts, trigger='date', args=[test_unit],
date=later, id='bts_unit{0}'.format(test_unit))
bts_acquisition.status = 'running_stress'
bts_acquisition.start_bts_interval = now
if not self._schedulerRunning:
self._scheduler.start()
self._schedulerRunning = True
def stop_bts(self, test_unit: int):
# Maybe we need to do something else here, else change all method calls to just, ramp_down.
self.ramp_down(test_unit=test_unit)
def _log_temperature(self, test_unit: int):
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
now = datetime.now()
temperature = a.temperature
current = self._keithley.current
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
clean_devices = bts_acquisition.clean_devices
contaminated_devices = bts_acquisition.contaminated_devices
dt = bts_acquisition.time_delta_total(current_datetime=now)
for device in clean_devices:
h5_storage: datastorage.BTSH5Store = bts_acquisition.get_device_storage(device=device, clean=True)
h5_storage.log_temperature_current(time=dt,
temperature=temperature,
current=current)
for device in contaminated_devices:
h5_storage: datastorage.BTSH5Store = bts_acquisition.get_device_storage(device=device, clean=False)
h5_storage.log_temperature_current(time=dt,
temperature=temperature,
current=current)
def start_measurement(self):
now = datetime.now()
for test_unit in range(self._testUnits):
self._log_cv_sweep(test_unit=test_unit)
status_job_id = 'check_status{0}'.format(test_unit)
self._scheduler.add_job(func=self._check_status, trigger='interval', args=[test_unit], id=status_job_id,
seconds=10)
self.start_temperature_log(test_unit=test_unit)
if not self._schedulerRunning:
self._scheduler.start()
self._schedulerRunning = True
def _check_status(self, test_unit: int):
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
now = datetime.now()
temperature = a.temperature
# If the device temperature equals the target temperature and the status is ramping up, start stress
if np.isclose(a.temperature, bts_acquisition.temperature,
atol=2) and bts_acquisition.status == 'heating_up':
self.start_bts(test_unit=test_unit)
elif a.temperature <= 26 and bts_acquisition.status == 'cooling_down':
dt = bts_acquisition.time_delta_bts(current_datetime=datetime.now())
bts_acquisition.accumulate_interval(dt=dt)
bts_acquisition.status = "idle"
self._log_cv_sweep(test_unit=test_unit)
elif bts_acquisition.status == 'finished':
self._unit_finished(test_unit=test_unit)
def _unit_finished(self, test_unit: int):
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
if (a.temperature > 30) and (not a.fan_status):
a.fan_on()
elif a.fan_status:
a.fan_off()
# Stop logging temperature
self._scheduler.remove_job(job_id='temperature_log_unit{0}'.format(test_unit))
status_job_id = 'check_status{0}'.format(test_unit)
# Stop checking if the unit is ready
self._scheduler.remove_job(job_id=status_job_id)
if test_unit not in self._finishedUnits:
self._finishedUnits.append(test_unit)
if len(self._finishedUnits) == self._testUnits:
self._scheduler.shutdown()
self._schedulerRunning = False
def _log_cv_sweep(self, test_unit: int):
# Make sure the board is idle
bts_acquisition: bts.BTS = self._btsAcquisitions[test_unit]
# If the current callback is in a scheduler queue, remove the job from the scheduler
job_id = 'bts_cv_test_unit{0}'.format(test_unit)
if job_id in self._scheduler.get_jobs():
self._scheduler.remove_job(job_id=job_id)
# Check if the measurement is not finished
if bts_acquisition.status == "idle":
# If the impedance analyzer is busy, schedule this measurement for later
if len(self._activeCVUnit) > 0 or self._impedanceAnalyzer.status == "running":
next_date = datetime.now() + timedelta(seconds=self._impedanceAnalyzer.wait_time)
self._scheduler.add_job(self._log_cv_sweep(test_unit=test_unit),
trigger='date', run_date=next_date,
args=[test_unit], id=job_id)
else:
bts_acquisition.status = "running_cv"
# Lock the Impedance Analyzer measurement to the test_unit
self._activeCVUnit.append(test_unit)
# Get the arduino for the selected board
a: arduino_board.ArduinoBoard = self._arduinos[test_unit]
if a.keithley_connected:
a.disconnect_keithley()
if a.fan_status: # If fan is on
a.fan_off()
# Make sure no other pins are connected to the impedance analyzer
for i in range(self._testUnits):
ai: arduino_board.ArduinoBoard = self._arduinos[i]
b: bts.BTS = self._btsAcquisitions[i]
if b.status != 'running_stress':
ai.disconnect_all_pins()
# loop over all clean pins
for d, p in zip(bts_acquisition.clean_devices, bts_acquisition.clean_pins):
# turn the pin on
a.pin_on(pin_number=p)
# collect the data from the impedance analyzer
data: vcr_type = self._impedanceAnalyzer.cv_sweep(
voltage_start=float(self._cvSweepParams['voltage_start']),
voltage_step=float(self._cvSweepParams['voltage_step']),
voltage_stop=float(self._cvSweepParams['voltage_stop']),
frequency=float(self._cvSweepParams['frequency']),
integration_time=self._cvSweepParams['integration_time'],
noa=int(self._cvSweepParams['number_of_averages']),
osc_amplitude=float(self._cvSweepParams['osc_amplitude']),
sweep_direction=self._cvSweepParams['sweep_direction']
)
| |
from template import Template
from template.test import TestCase, main
class Foo:
def __init__(self, **kwargs):
self.__dict = kwargs
def present(self, view):
return "{ %s }" % ", ".join(
"%s => %s" % item for item in sorted(self.__dict.items()))
def reverse(self, view):
return "{ %s }" % ", ".join(
"%s => %s" % item for item in sorted(self.__dict.items(), reverse=True))
class MyList:
def __init__(self, *args):
self.__args = args
def as_list(self):
return self.__args
class ViewTest(TestCase):
def testView(self):
vars = { "foo": Foo(pi=3.14, e=2.718),
"blessed_list": MyList("Hello", "World") }
t = Template()
context = t.context()
view = context.view()
self.assert_(view)
view = context.view({"prefix": "my"})
self.assert_(view)
self.assertEquals("my", view.prefix())
self.Expect(DATA, None, vars)
DATA = r"""
-- test --
[% USE v = View -%]
[[% v.prefix %]]
-- expect --
[]
-- test --
[% USE v = View( map => { default="any" } ) -%]
[[% v.map.default %]]
-- expect --
[any]
-- test --
[% USE view( prefix=> 'foo/', suffix => '.tt2') -%]
[[% view.prefix %]bar[% view.suffix %]]
[[% view.template_name('baz') %]]
-- expect --
[foo/bar.tt2]
[foo/baz.tt2]
-- test --
[% USE view( prefix=> 'foo/', suffix => '.tt2') -%]
[[% view.prefix %]bar[% view.suffix %]]
[[% view.template_name('baz') %]]
-- expect --
[foo/bar.tt2]
[foo/baz.tt2]
-- test --
[% USE view -%]
[% view.print('Hello World') %]
[% BLOCK text %]TEXT: [% item %][% END -%]
-- expect --
TEXT: Hello World
-- test --
[% USE view -%]
[% view.print( { foo => 'bar' } ) %]
[% BLOCK hash %]HASH: {
[% FOREACH key = item.keys.sort -%]
[% key %] => [% item.$key %]
[%- END %]
}
[% END -%]
-- expect --
HASH: {
foo => bar
}
-- test --
[% USE view -%]
[% view = view.clone( prefix => 'my_' ) -%]
[% view.view('hash', { bar => 'baz' }) %]
[% BLOCK my_hash %]HASH: {
[% FOREACH key = item.keys.sort -%]
[% key %] => [% item.$key %]
[%- END %]
}
[% END -%]
-- expect --
HASH: {
bar => baz
}
-- test --
[% USE view(prefix='my_') -%]
[% view.print( foo => 'wiz', bar => 'waz' ) %]
[% BLOCK my_hash %]KEYS: [% item.keys.sort.join(', ') %][% END %]
-- expect --
KEYS: bar, foo
-- test --
[% USE view -%]
[% view.print( view ) %]
[% BLOCK View %]Printing a View object[% END -%]
-- expect --
Printing a View object
-- test --
[% USE view(prefix='my_') -%]
[% view.print( view ) %]
[% view.print( view, prefix='your_' ) %]
[% BLOCK my_View %]Printing my View object[% END -%]
[% BLOCK your_View %]Printing your View object[% END -%]
-- expect --
Printing my View object
Printing your View object
-- test --
[% USE view(prefix='my_', notfound='any' ) -%]
[% view.print( view ) %]
[% view.print( view, prefix='your_' ) %]
[% BLOCK my_any %]Printing any of my objects[% END -%]
[% BLOCK your_any %]Printing any of your objects[% END -%]
-- expect --
Printing any of my objects
Printing any of your objects
-- test --
[% USE view(prefix => 'my_', map => { default => 'catchall' } ) -%]
[% view.print( view ) %]
[% view.print( view, default="catchsome" ) %]
[% BLOCK my_catchall %]Catching all defaults[% END -%]
[% BLOCK my_catchsome %]Catching some defaults[% END -%]
-- expect --
Catching all defaults
Catching some defaults
-- test --
[% USE view(prefix => 'my_', map => { default => 'catchnone' } ) -%]
[% view.default %]
[% view.default = 'catchall' -%]
[% view.default %]
[% view.print( view ) %]
[% view.print( view, default="catchsome" ) %]
[% BLOCK my_catchall %]Catching all defaults[% END -%]
[% BLOCK my_catchsome %]Catching some defaults[% END -%]
-- expect --
catchnone
catchall
Catching all defaults
Catching some defaults
-- test --
[% USE view(prefix='my_', default='catchall' notfound='lost') -%]
[% view.print( view ) %]
[% BLOCK my_lost %]Something has been found[% END -%]
-- expect --
Something has been found
-- test --
[% USE view -%]
[% TRY ;
view.print( view ) ;
CATCH view ;
"[$error.type] $error.info" ;
END
%]
-- expect --
[view] file error - View: not found
-- test --
[% USE view -%]
[% view.print( foo ) %]
-- expect --
{ e => 2.718, pi => 3.14 }
-- test --
[% USE view -%]
[% view.print( foo, method => 'reverse' ) %]
-- expect --
{ pi => 3.14, e => 2.718 }
-- test --
[% USE view(prefix='my_', include_naked=0, view_naked=1) -%]
[% BLOCK my_foo; "Foo: $item"; END -%]
[[% view.view_foo(20) %]]
[[% view.foo(30) %]]
-- expect --
[Foo: 20]
[Foo: 30]
-- test --
[% USE view(prefix='my_', include_naked=0, view_naked=0) -%]
[% BLOCK my_foo; "Foo: $item"; END -%]
[[% view.view_foo(20) %]]
[% TRY ;
view.foo(30) ;
CATCH ;
error.info ;
END
%]
-- expect --
[Foo: 20]
no such view member: foo
-- test --
[% USE view(map => { HASH => 'my_hash', ARRAY => 'your_list' }) -%]
[% BLOCK text %]TEXT: [% item %][% END -%]
[% BLOCK my_hash %]HASH: [% item.keys.sort.join(', ') %][% END -%]
[% BLOCK your_list %]LIST: [% item.join(', ') %][% END -%]
[% view.print("some text") %]
[% view.print({ alpha => 'a', bravo => 'b' }) %]
[% view.print([ 'charlie', 'delta' ]) %]
-- expect --
TEXT: some text
HASH: alpha, bravo
LIST: charlie, delta
-- test --
[% USE view(item => 'thing',
map => { HASH => 'my_hash', ARRAY => 'your_list' }) -%]
[% BLOCK text %]TEXT: [% thing %][% END -%]
[% BLOCK my_hash %]HASH: [% thing.keys.sort.join(', ') %][% END -%]
[% BLOCK your_list %]LIST: [% thing.join(', ') %][% END -%]
[% view.print("some text") %]
[% view.print({ alpha => 'a', bravo => 'b' }) %]
[% view.print([ 'charlie', 'delta' ]) %]
-- expect --
TEXT: some text
HASH: alpha, bravo
LIST: charlie, delta
-- test --
[% USE view -%]
[% view.print('Hello World') %]
[% view1 = view.clone( prefix='my_') -%]
[% view1.print('Hello World') %]
[% view2 = view1.clone( prefix='dud_', notfound='no_text' ) -%]
[% view2.print('Hello World') %]
[% BLOCK text %]TEXT: [% item %][% END -%]
[% BLOCK my_text %]MY TEXT: [% item %][% END -%]
[% BLOCK dud_no_text %]NO TEXT: [% item %][% END -%]
-- expect --
TEXT: Hello World
MY TEXT: Hello World
NO TEXT: Hello World
-- test --
[% USE view( prefix = 'base_', default => 'any' ) -%]
[% view1 = view.clone( prefix => 'one_') -%]
[% view2 = view.clone( prefix => 'two_') -%]
[% view.default %] / [% view.map.default %]
[% view1.default = 'anyone' -%]
[% view1.default %] / [% view1.map.default %]
[% view2.map.default = 'anytwo' -%]
[% view2.default %] / [% view2.map.default %]
[% view.print("Hello World") %] / [% view.print(blessed_list) %]
[% view1.print("Hello World") %] / [% view1.print(blessed_list) %]
[% view2.print("Hello World") %] / [% view2.print(blessed_list) %]
[% BLOCK base_text %]ANY TEXT: [% item %][% END -%]
[% BLOCK one_text %]ONE TEXT: [% item %][% END -%]
[% BLOCK two_text %]TWO TEXT: [% item %][% END -%]
[% BLOCK base_any %]BASE ANY: [% item.as_list.join(', ') %][% END -%]
[% BLOCK one_anyone %]ONE ANY: [% item.as_list.join(', ') %][% END -%]
[% BLOCK two_anytwo %]TWO ANY: [% item.as_list.join(', ') %][% END -%]
-- expect --
any / any
anyone / anyone
anytwo / anytwo
ANY TEXT: Hello World / BASE ANY: Hello, World
ONE TEXT: Hello World / ONE ANY: Hello, World
TWO TEXT: Hello World / TWO ANY: Hello, World
-- test --
[% USE view( prefix => 'my_', item => 'thing' ) -%]
[% view.view('thingy', [ 'foo', 'bar'] ) %]
[% BLOCK my_thingy %]thingy: [ [% thing.join(', ') %] ][%END %]
-- expect --
thingy: [ foo, bar ]
-- test --
[% USE view -%]
[% view.map.${'View'} = 'myview' -%]
[% view.print(view) %]
[% BLOCK myview %]MYVIEW[% END%]
-- expect --
MYVIEW
-- test --
[% USE view -%]
[% view.include('greeting', msg => 'Hello World!') %]
[% BLOCK greeting %]msg: [% msg %][% END -%]
-- expect --
msg: Hello World!
-- test --
[% USE view( prefix="my_" )-%]
[% view.include('greeting', msg => 'Hello World!') %]
[% BLOCK my_greeting %]msg: [% msg %][% END -%]
-- expect --
msg: Hello World!
-- test --
[% USE view( prefix="my_" )-%]
[% view.include_greeting( msg => 'Hello World!') %]
[% BLOCK my_greeting %]msg: [% msg %][% END -%]
-- expect --
msg: Hello World!
-- test --
[% USE view( prefix="my_" )-%]
[% INCLUDE $view.template('greeting')
msg = 'Hello World!' %]
[% BLOCK my_greeting %]msg: [% msg %][% END -%]
-- expect --
msg: Hello World!
-- test --
[% USE view( title="My View" )-%]
[% view.title %]
-- expect --
My View
-- test --
[% USE view( title="My View" )-%]
[% newview = view.clone( col = 'Chartreuse') -%]
[% newerview = newview.clone( title => 'New Title' ) -%]
[% view.title %]
[% newview.title %]
[% newview.col %]
[% newerview.title %]
[% newerview.col %]
-- expect --
My View
My View
Chartreuse
New Title
Chartreuse
#------------------------------------------------------------------------
-- test --
[% VIEW fred prefix='blat_' %]
This is the view
[% END -%]
[% BLOCK blat_foo; 'This is blat_foo'; END -%]
[% fred.view_foo %]
-- expect --
This is blat_foo
-- test --
[% VIEW fred %]
This is the view
[% view.prefix = 'blat_' %]
[% END -%]
[% BLOCK blat_foo; 'This is blat_foo'; END -%]
[% fred.view_foo %]
-- expect --
This is blat_foo
-- test --
[% VIEW fred %]
This is the view
[% view.prefix = 'blat_' %]
[% view.thingy = 'bloop' %]
[% fred.name = 'Freddy' %]
[% END -%]
[% fred.prefix %]
[% fred.thingy %]
[% fred.name %]
-- expect --
blat_
bloop
Freddy
-- test --
[% VIEW fred prefix='blat_'; view.name='Fred'; END -%]
[% | |
Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def direct_pad_area(self):
"""field `Direct Pad Area`
| Units: m2
| Default value: "Autosize"
Args:
value (float or "Autosize"): value for IDD Field `Direct Pad Area`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `direct_pad_area` or None if not set
"""
return self["Direct Pad Area"]
@direct_pad_area.setter
def direct_pad_area(self, value="Autosize"):
"""Corresponds to IDD field `Direct Pad Area`"""
self["Direct Pad Area"] = value
@property
def direct_pad_depth(self):
"""field `Direct Pad Depth`
| Units: m
| Default value: "Autosize"
Args:
value (float or "Autosize"): value for IDD Field `Direct Pad Depth`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `direct_pad_depth` or None if not set
"""
return self["Direct Pad Depth"]
@direct_pad_depth.setter
def direct_pad_depth(self, value="Autosize"):
"""Corresponds to IDD field `Direct Pad Depth`"""
self["Direct Pad Depth"] = value
@property
def recirculating_water_pump_power_consumption(self):
"""field `Recirculating Water Pump Power Consumption`
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Recirculating Water Pump Power Consumption`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `recirculating_water_pump_power_consumption` or None if not set
"""
return self["Recirculating Water Pump Power Consumption"]
@recirculating_water_pump_power_consumption.setter
def recirculating_water_pump_power_consumption(self, value=None):
"""Corresponds to IDD field `Recirculating Water Pump Power
Consumption`"""
self["Recirculating Water Pump Power Consumption"] = value
@property
def secondary_air_fan_flow_rate(self):
"""field `Secondary Air Fan Flow Rate`
| Units: m3/s
Args:
value (float): value for IDD Field `Secondary Air Fan Flow Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `secondary_air_fan_flow_rate` or None if not set
"""
return self["Secondary Air Fan Flow Rate"]
@secondary_air_fan_flow_rate.setter
def secondary_air_fan_flow_rate(self, value=None):
"""Corresponds to IDD field `Secondary Air Fan Flow Rate`"""
self["Secondary Air Fan Flow Rate"] = value
@property
def secondary_air_fan_total_efficiency(self):
"""field `Secondary Air Fan Total Efficiency`
| value <= 1.0
Args:
value (float): value for IDD Field `Secondary Air Fan Total Efficiency`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `secondary_air_fan_total_efficiency` or None if not set
"""
return self["Secondary Air Fan Total Efficiency"]
@secondary_air_fan_total_efficiency.setter
def secondary_air_fan_total_efficiency(self, value=None):
"""Corresponds to IDD field `Secondary Air Fan Total Efficiency`"""
self["Secondary Air Fan Total Efficiency"] = value
@property
def secondary_air_fan_delta_pressure(self):
"""field `Secondary Air Fan Delta Pressure`
| Units: Pa
| IP-Units: inH2O
Args:
value (float): value for IDD Field `Secondary Air Fan Delta Pressure`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `secondary_air_fan_delta_pressure` or None if not set
"""
return self["Secondary Air Fan Delta Pressure"]
@secondary_air_fan_delta_pressure.setter
def secondary_air_fan_delta_pressure(self, value=None):
"""Corresponds to IDD field `Secondary Air Fan Delta Pressure`"""
self["Secondary Air Fan Delta Pressure"] = value
@property
def indirect_heat_exchanger_effectiveness(self):
"""field `Indirect Heat Exchanger Effectiveness`
Args:
value (float): value for IDD Field `Indirect Heat Exchanger Effectiveness`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `indirect_heat_exchanger_effectiveness` or None if not set
"""
return self["Indirect Heat Exchanger Effectiveness"]
@indirect_heat_exchanger_effectiveness.setter
def indirect_heat_exchanger_effectiveness(self, value=None):
"""Corresponds to IDD field `Indirect Heat Exchanger Effectiveness`"""
self["Indirect Heat Exchanger Effectiveness"] = value
@property
def primary_air_inlet_node_name(self):
"""field `Primary Air Inlet Node Name`
Args:
value (str): value for IDD Field `Primary Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `primary_air_inlet_node_name` or None if not set
"""
return self["Primary Air Inlet Node Name"]
@primary_air_inlet_node_name.setter
def primary_air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Primary Air Inlet Node Name`"""
self["Primary Air Inlet Node Name"] = value
@property
def primary_air_outlet_node_name(self):
"""field `Primary Air Outlet Node Name`
Args:
value (str): value for IDD Field `Primary Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `primary_air_outlet_node_name` or None if not set
"""
return self["Primary Air Outlet Node Name"]
@primary_air_outlet_node_name.setter
def primary_air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Primary Air Outlet Node Name`"""
self["Primary Air Outlet Node Name"] = value
@property
def control_type(self):
"""field `Control Type`
| This field is not currently used and can be left blank
Args:
value (str): value for IDD Field `Control Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_type` or None if not set
"""
return self["Control Type"]
@control_type.setter
def control_type(self, value=None):
"""Corresponds to IDD field `Control Type`"""
self["Control Type"] = value
@property
def water_supply_storage_tank_name(self):
"""field `Water Supply Storage Tank Name`
Args:
value (str): value for IDD Field `Water Supply Storage Tank Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `water_supply_storage_tank_name` or None if not set
"""
return self["Water Supply Storage Tank Name"]
@water_supply_storage_tank_name.setter
def water_supply_storage_tank_name(self, value=None):
"""Corresponds to IDD field `Water Supply Storage Tank Name`"""
self["Water Supply Storage Tank Name"] = value
@property
def secondary_air_inlet_node_name(self):
"""field `Secondary Air Inlet Node Name`
| Enter the name of an outdoor air node
Args:
value (str): value for IDD Field `Secondary Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `secondary_air_inlet_node_name` or None if not set
"""
return self["Secondary Air Inlet Node Name"]
@secondary_air_inlet_node_name.setter
def secondary_air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Secondary Air Inlet Node Name`"""
self["Secondary Air Inlet Node Name"] = value
class EvaporativeCoolerIndirectWetCoil(DataObject):
""" Corresponds to IDD object `EvaporativeCooler:Indirect:WetCoil`
Indirect evaporative cooler with wetted coil, recirculating water pump, and secondary
air fan. This model has no controls other than its availability schedule.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'coil maximum efficiency',
{'name': u'Coil Maximum Efficiency',
'pyname': u'coil_maximum_efficiency',
'maximum': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'coil flow ratio',
{'name': u'Coil Flow Ratio',
'pyname': u'coil_flow_ratio',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'recirculating water pump power consumption',
{'name': u'Recirculating Water Pump Power Consumption',
'pyname': u'recirculating_water_pump_power_consumption',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'W'}),
(u'secondary air fan flow rate',
{'name': u'Secondary Air Fan Flow Rate',
'pyname': u'secondary_air_fan_flow_rate',
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm3/s'}),
(u'secondary air fan total efficiency',
{'name': u'Secondary Air Fan Total Efficiency',
'pyname': u'secondary_air_fan_total_efficiency',
'minimum>': 0.0,
'maximum': 1.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'secondary air fan delta pressure',
{'name': u'Secondary Air Fan Delta Pressure',
'pyname': u'secondary_air_fan_delta_pressure',
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'Pa'}),
(u'primary air inlet node name',
{'name': u'Primary Air Inlet Node Name',
'pyname': u'primary_air_inlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'primary air outlet node name',
{'name': u'Primary Air Outlet Node Name',
'pyname': u'primary_air_outlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'control type',
{'name': u'Control Type',
'pyname': u'control_type',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'water supply storage tank name',
{'name': u'Water Supply Storage Tank Name',
'pyname': u'water_supply_storage_tank_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'secondary air inlet node name',
{'name': u'Secondary Air Inlet Node Name',
'pyname': u'secondary_air_inlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Evaporative Coolers',
'min-fields': 13,
'name': u'EvaporativeCooler:Indirect:WetCoil',
'pyname': u'EvaporativeCoolerIndirectWetCoil',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
| |
# -*- coding: utf-8 -*-
'''
Return the results of a highstate (or any other state function that returns
data in a compatible format) via an HTML email or HTML file.
.. versionadded:: 2017.7.0
Similar results can be achieved by using the smtp returner with a custom template,
except an attempt at writing such a template for the complex data structure
returned by highstate function had proven to be a challenge, not to mention
that the smtp module doesn't support sending HTML mail at the moment.
The main goal of this returner was to produce an easy to read email similar
to the output of highstate outputter used by the CLI.
This returner could be very useful during scheduled executions,
but could also be useful for communicating the results of a manual execution.
Returner configuration is controlled in a standard fashion either via
highstate group or an alternatively named group.
.. code-block:: bash
salt '*' state.highstate --return highstate
To use the alternative configuration, append '--return_config config-name'
.. code-block:: bash
salt '*' state.highstate --return highstate --return_config simple
Here is an example of what the configuration might look like:
.. code-block:: yaml
simple.highstate:
report_failures: True
report_changes: True
report_everything: False
failure_function: pillar.items
success_function: pillar.items
report_format: html
report_delivery: smtp
smtp_success_subject: 'success minion {id} on host {host}'
smtp_failure_subject: 'failure minion {id} on host {host}'
smtp_server: smtp.example.com
smtp_recipients: <EMAIL>, <EMAIL>
smtp_sender: <EMAIL>
The *report_failures*, *report_changes*, and *report_everything* flags provide
filtering of the results. If you want an email to be sent every time, then
*report_everything* is your choice. If you want to be notified only when
changes were successfully made use *report_changes*. And *report_failures* will
generate an email if there were failures.
The configuration allows you to run a salt module function in case of
success (*success_function*) or failure (*failure_function*).
Any salt function, including ones defined in the _module folder of your salt
repo, could be used here and its output will be displayed under the 'extra'
heading of the email.
Supported values for *report_format* are html, json, and yaml. The latter two
are typically used for debugging purposes, but could be used for applying
a template at some later stage.
The values for *report_delivery* are smtp or file. In case of file delivery
the only other applicable option is *file_output*.
In case of smtp delivery, smtp_* options demonstrated by the example above
could be used to customize the email.
As you might have noticed, the success and failure subjects contain {id} and {host}
values. Any other grain name could be used. As opposed to using
{{grains['id']}}, which will be rendered by the master and contain master's
values at the time of pillar generation, these will contain minion values at
the time of execution.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import smtplib
import cgi
from email.mime.text import MIMEText
from salt.ext.six.moves import range
from salt.ext.six.moves import StringIO
from salt.ext import six
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.utils.yaml
import salt.returners
log = logging.getLogger(__name__)
__virtualname__ = 'highstate'
def __virtual__():
'''
Return our name
'''
return __virtualname__
def _get_options(ret):
'''
Return options
'''
attrs = {
'report_everything': 'report_everything',
'report_changes': 'report_changes',
'report_failures': 'report_failures',
'failure_function': 'failure_function',
'success_function': 'success_function',
'report_format': 'report_format',
'report_delivery': 'report_delivery',
'file_output': 'file_output',
'smtp_sender': 'smtp_sender',
'smtp_recipients': 'smtp_recipients',
'smtp_failure_subject': 'smtp_failure_subject',
'smtp_success_subject': 'smtp_success_subject',
'smtp_server': 'smtp_server'
}
_options = salt.returners.get_returner_options(
__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
#
# Most email readers to not support <style> tag.
# The following dict and a function provide a primitive styler
# sufficient for our needs.
#
_STYLES = {
'_table': 'border-collapse:collapse;width:100%;',
'_td': 'vertical-align:top;'
'font-family:Helvetica,Arial,sans-serif;font-size:9pt;',
'unchanged': 'color:blue;',
'changed': 'color:green',
'failed': 'color:red;',
'first': 'border-top:0;border-left:1px solid #9e9e9e;',
'first_first': 'border-top:0;border-left:0;',
'notfirst_first': 'border-left:0;border-top:1px solid #9e9e9e;',
'other': 'border-top:1px solid #9e9e9e;border-left:1px solid #9e9e9e;',
'name': 'width:70pt;',
'container': 'padding:0;'
}
def _lookup_style(element, names):
'''
Lookup style by either element name or the list of classes
'''
return _STYLES.get('_'+element, '') + \
''.join([_STYLES.get(name, '') for name in names])
def _generate_html_table(data, out, level=0, extra_style=''):
'''
Generate a single table of data
'''
print('<table style="{0}">'.format(
_lookup_style('table', ['table' + six.text_type(level)])), file=out)
firstone = True
row_style = 'row' + six.text_type(level)
cell_style = 'cell' + six.text_type(level)
for subdata in data:
first_style = 'first_first' if firstone else 'notfirst_first'
second_style = 'first' if firstone else 'other'
if isinstance(subdata, dict):
if '__style__' in subdata:
new_extra_style = subdata['__style__']
del subdata['__style__']
else:
new_extra_style = extra_style
if len(subdata) == 1:
name, value = next(six.iteritems(subdata))
print('<tr style="{0}">'.format(
_lookup_style('tr', [row_style])
), file=out)
print('<td style="{0}">{1}</td>'.format(
_lookup_style(
'td',
[cell_style, first_style, 'name', new_extra_style]
),
name
), file=out)
if isinstance(value, list):
print('<td style="{0}">'.format(
_lookup_style(
'td',
[
cell_style,
second_style,
'container',
new_extra_style
]
)
), file=out)
_generate_html_table(
value,
out,
level + 1,
new_extra_style
)
print('</td>', file=out)
else:
print('<td style="{0}">{1}</td>'.format(
_lookup_style(
'td',
[
cell_style,
second_style,
'value',
new_extra_style
]
),
cgi.escape(six.text_type(value))
), file=out)
print('</tr>', file=out)
elif isinstance(subdata, list):
print('<tr style="{0}">'.format(
_lookup_style('tr', [row_style])
), file=out)
print('<td style="{0}">'.format(
_lookup_style(
'td',
[cell_style, first_style, 'container', extra_style]
)
), file=out)
_generate_html_table(subdata, out, level + 1, extra_style)
print('</td>', file=out)
print('</tr>', file=out)
else:
print('<tr style="{0}">'.format(
_lookup_style('tr', [row_style])
), file=out)
print('<td style="{0}">{1}</td>'.format(
_lookup_style(
'td',
[cell_style, first_style, 'value', extra_style]
),
cgi.escape(six.text_type(subdata))
), file=out)
print('</tr>', file=out)
firstone = False
print('</table>', file=out)
def _generate_html(data, out):
'''
Generate report data as HTML
'''
print('<html>', file=out)
print('<body>', file=out)
_generate_html_table(data, out, 0)
print('</body>', file=out)
print('</html>', file=out)
def _dict_to_name_value(data):
'''
Convert a dictionary to a list of dictionaries to facilitate ordering
'''
if isinstance(data, dict):
sorted_data = sorted(data.items(), key=lambda s: s[0])
result = []
for name, value in sorted_data:
if isinstance(value, dict):
result.append({name: _dict_to_name_value(value)})
else:
result.append({name: value})
else:
result = data
return result
def _generate_states_report(sorted_data):
'''
Generate states report
'''
states = []
for state, data in sorted_data:
module, stateid, name, function = state.split('_|-')
module_function = '.'.join((module, function))
result = data.get('result', '')
single = [
{'function': module_function},
{'name': name},
{'result': result},
{'duration': data.get('duration', 0.0)},
{'comment': data.get('comment', '')}
]
if not result:
style = 'failed'
else:
changes = data.get('changes', {})
if changes and isinstance(changes, dict):
single.append({'changes': _dict_to_name_value(changes)})
style = 'changed'
else:
style = 'unchanged'
started = data.get('start_time', '')
if started:
single.append({'started': started})
states.append({stateid: single, '__style__': style})
return states
def _generate_report(ret, setup):
'''
Generate report dictionary
'''
retdata = ret.get('return', {})
sorted_data = sorted(
retdata.items(),
key=lambda s: s[1].get('__run_num__', 0)
)
total = 0
failed = 0
changed = 0
duration = 0.0
# gather stats
for _, data in sorted_data:
if not data.get('result', True):
failed += 1
total += 1
try:
duration += float(data.get('duration', 0.0))
except ValueError:
pass
if data.get('changes', {}):
changed += 1
unchanged = total - failed - changed
log.debug('highstate total: %s', total)
log.debug('highstate failed: %s', failed)
log.debug('highstate unchanged: %s', unchanged)
log.debug('highstate changed: %s', changed)
# generate report if required
if setup.get('report_everything', False) or \
(setup.get('report_changes', True) and changed != 0) or \
(setup.get('report_failures', True) and failed != 0):
report = [
{'stats': [
{'total': total},
{'failed': failed, '__style__': 'failed'},
{'unchanged': unchanged, '__style__': 'unchanged'},
{'changed': changed, '__style__': 'changed'},
{'duration': duration}
]},
{'job': [
{'function': ret.get('fun', '')},
{'arguments': ret.get('fun_args', '')},
{'jid': ret.get('jid', '')},
{'success': ret.get('success', True)},
{'retcode': ret.get('retcode', 0)}
]},
{'states': _generate_states_report(sorted_data)}
]
if failed:
function = setup.get('failure_function', None)
else:
function = setup.get('success_function', None)
if function:
func_result = __salt__[function]()
report.insert(
0,
{'extra': [{function: _dict_to_name_value(func_result)}]}
)
else:
report = []
return report, failed
def _sprinkle(config_str):
'''
Sprinkle with grains of salt, that is
convert 'test {id} test {host} ' types of strings
'''
parts = [x for sub in config_str.split('{') for x in sub.split('}')]
for i in range(1, len(parts), 2):
parts[i] = six.text_type(__grains__.get(parts[i], ''))
return ''.join(parts)
def _produce_output(report, failed, setup):
'''
Produce output from the report dictionary generated by _generate_report
'''
report_format = setup.get('report_format', 'yaml')
log.debug('highstate output format: %s', report_format)
if report_format == 'json':
report_text = salt.utils.json.dumps(report)
elif report_format == 'yaml':
string_file = StringIO()
salt.utils.yaml.safe_dump(report, string_file, default_flow_style=False)
string_file.seek(0)
report_text = string_file.read()
else:
string_file = StringIO()
_generate_html(report, string_file)
string_file.seek(0)
report_text = string_file.read()
report_delivery = setup.get('report_delivery', 'file')
log.debug('highstate report_delivery: %s', report_delivery)
if report_delivery == 'file':
output_file = _sprinkle(setup.get('file_output', '/tmp/test.rpt'))
with salt.utils.files.fopen(output_file, 'w') as out:
out.write(salt.utils.stringutils.to_str(report_text))
else:
msg = MIMEText(report_text, report_format)
sender = setup.get('smtp_sender', '')
recipients = setup.get('smtp_recipients', '')
if failed:
subject = setup.get('smtp_failure_subject', 'Installation failure')
else:
subject = setup.get('smtp_success_subject', 'Installation success')
subject = _sprinkle(subject)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipients
smtp = smtplib.SMTP(host=setup.get('smtp_server', ''))
smtp.sendmail(
sender,
[x.strip() for x in recipients.split(',')], msg.as_string())
smtp.quit()
def returner(ret):
'''
Check highstate return information and possibly fire off an email
or save a file.
'''
setup = | |
Returns the metadata for the newly created file and a boolean indicating
whether the moved entity is completely new (``True``) or overwrote a previously-existing
file (``False``).
:param dest_provider: ( :class:`.BaseProvider` ) a provider instance for the destination
:param src_path: ( :class:`.WaterButlerPath` ) the Path of the entity being moved
:param dest_path: ( :class:`.WaterButlerPath` ) the Path of the destination being moved to
:rtype: (:class:`.BaseFileMetadata`, :class:`bool`)
"""
data, created = await self.intra_copy(dest_provider, src_path, dest_path)
await self.delete(src_path)
return data, created
async def exists(self, path: wb_path.WaterButlerPath, **kwargs) \
-> typing.Union[bool, wb_metadata.BaseMetadata, typing.List[wb_metadata.BaseMetadata]]:
"""Check for existence of WaterButlerPath
Attempt to retrieve provider metadata to determine existence of a WaterButlerPath. If
successful, will return the result of `self.metadata()` which may be `[]` for empty
folders.
:param path: ( :class:`.WaterButlerPath` ) path to check for
:rtype: (`self.metadata()` or False)
"""
try:
return await self.metadata(path, **kwargs)
except exceptions.NotFoundError:
return False
except exceptions.MetadataError as e:
if e.code != 404:
raise
return False
async def handle_name_conflict(self,
path: wb_path.WaterButlerPath,
conflict: str='replace',
**kwargs) -> typing.Tuple[wb_path.WaterButlerPath, bool]:
"""Check WaterButlerPath and resolve conflicts
Given a WaterButlerPath and a conflict resolution pattern determine
the correct file path to upload to and indicate if that file exists or not
:param path: ( :class:`.WaterButlerPath` ) Desired path to check for conflict
:param conflict: ( :class:`str` ) replace, keep, warn
:rtype: (:class:`.WaterButlerPath` or False)
:raises: :class:`.NamingConflict`
"""
exists = await self.exists(path, **kwargs)
if (not exists and not exists == []) or conflict == 'replace':
return path, exists # type: ignore
if conflict == 'warn':
raise exceptions.NamingConflict(path.name)
while True:
path.increment_name()
test_path = await self.revalidate_path(
path.parent,
path.name,
folder=path.is_dir
)
exists = await self.exists(test_path, **kwargs)
if not (exists or exists == []):
break
return path, False
async def revalidate_path(self,
base: wb_path.WaterButlerPath,
path: str,
folder: bool=False) -> wb_path.WaterButlerPath:
"""Take a path and a base path and build a WaterButlerPath representing `/base/path`. For
id-based providers, this will need to lookup the id of the new child object.
:param base: ( :class:`.WaterButlerPath` ) The base folder to look under
:param path: ( :class:`str`) the path of a child of `base`, relative to `base`
:param folder: ( :class:`bool` ) whether the returned WaterButlerPath should be a folder
:rtype: :class:`.WaterButlerPath`
"""
return base.child(path, folder=folder)
async def zip(self, path: wb_path.WaterButlerPath, **kwargs) -> asyncio.StreamReader:
"""Streams a Zip archive of the given folder
:param path: ( :class:`.WaterButlerPath` ) The folder to compress
"""
meta_data = await self.metadata(path) # type: ignore
if path.is_file:
meta_data = [meta_data] # type: ignore
path = path.parent
return streams.ZipStreamReader(ZipStreamGenerator(self, path, *meta_data)) # type: ignore
def shares_storage_root(self, other: 'BaseProvider') -> bool:
"""Returns True if ``self`` and ``other`` both point to the same storage root. Used to
detect when a file move/copy action might result in the file overwriting itself. Most
providers have enough uniquely identifing information in the settings to detect this,
but some providers may need to override this to do further detection.
:param other: ( :class:`.BaseProvider`) another provider instance to compare with
:rtype: :class:`bool` (True if both providers use the same storage root)
"""
return self.NAME == other.NAME and self.settings == other.settings
@abc.abstractmethod
def can_duplicate_names(self) -> bool:
"""Returns True if a file and a folder in the same directory can have identical names."""
raise NotImplementedError
@abc.abstractmethod
async def download(self, src_path: wb_path.WaterButlerPath, **kwargs) \
-> streams.ResponseStreamReader:
r"""Download a file from this provider.
:param src_path: ( :class:`.WaterButlerPath` ) Path to the file to be downloaded
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: :class:`.ResponseStreamReader`
:raises: :class:`.DownloadError`
"""
raise NotImplementedError
@abc.abstractmethod
async def upload(self, stream: streams.BaseStream, path: wb_path.WaterButlerPath, *args,
**kwargs) -> typing.Tuple[wb_metadata.BaseFileMetadata, bool]:
r"""Uploads the given stream to the provider. Returns the metadata for the newly created
file and a boolean indicating whether the file is completely new (``True``) or overwrote
a previously-existing file (``False``)
:param path: ( :class:`.WaterButlerPath` ) Where to upload the file to
:param stream: ( :class:`.BaseStream` ) The content to be uploaded
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: (:class:`.BaseFileMetadata`, :class:`bool`)
:raises: :class:`.DeleteError`
"""
raise NotImplementedError
@abc.abstractmethod
async def delete(self, src_path: wb_path.WaterButlerPath, **kwargs) -> None:
r"""
:param src_path: ( :class:`.WaterButlerPath` ) Path to be deleted
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: :class:`None`
:raises: :class:`.DeleteError`
"""
raise NotImplementedError
@abc.abstractmethod
async def metadata(self, path: wb_path.WaterButlerPath, **kwargs) \
-> typing.Union[wb_metadata.BaseMetadata, typing.List[wb_metadata.BaseMetadata]]:
r"""Get metadata about the specified resource from this provider. Will be a :class:`list`
if the resource is a directory otherwise an instance of
:class:`.BaseFileMetadata`
.. note::
Mypy doesn't seem to do very well with functions that can return more than one type of
thing. See: https://github.com/python/mypy/issues/1693
:param path: ( :class:`.WaterButlerPath` ) The path to a file or folder
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: :class:`.BaseMetadata`
:rtype: :class:`list` of :class:`.BaseMetadata`
:raises: :class:`.MetadataError`
"""
raise NotImplementedError
@abc.abstractmethod
async def validate_v1_path(self, path: str, **kwargs) -> wb_path.WaterButlerPath:
"""API v1 requires that requests against folder endpoints always end with a slash, and
requests against files never end with a slash. This method checks the provider's metadata
for the given id and throws a 404 Not Found if the implicit and explicit types don't
match. This method duplicates the logic in the provider's validate_path method, but
validate_path must currently accomodate v0 AND v1 semantics. After v0's retirement, this
method can replace validate_path.
``path`` is the string in the url after the provider name and refers to the entity to be
acted on. For v1, this must *always exist*. If it does not, ``validate_v1_path`` should
return a 404. Creating a new file in v1 is done by making a PUT request against the parent
folder and specifying the file name as a query parameter. If a user attempts to create a
file by PUTting to its inferred path, validate_v1_path should reject this request with a
404.
:param path: ( :class:`str` ) user-supplied path to validate
:rtype: :class:`.WaterButlerPath`
:raises: :class:`.NotFoundError`
"""
raise NotImplementedError
@abc.abstractmethod
async def validate_path(self, path: str, **kwargs) -> wb_path.WaterButlerPath:
"""Validates paths passed in via the v0 API. v0 paths are much less strict than v1 paths.
They may represent things that exist or something that should be created. As such, the goal
of ``validate_path`` is to split the path into its component parts and attempt to determine
the ID of each part on the external provider. For instance, if the ``googledrive`` provider
receives a path of ``/foo/bar/baz.txt``, it will split those into ``/``, ``foo/``, ``bar/``,
and ``baz.txt``, and query Google Drive for the ID of each. ``validate_path`` then builds a
WaterButlerPath object with an ID, name tuple for each path part. The last part is
permitted to not have an ID, since it may represent a file that has not yet been created.
All other parts should have an ID.
The WaterButler v0 API is deprecated and will be removed in a future release. At that time
this method will be obsolete and will be removed from all providers.
:param path: ( :class:`str` ) user-supplied path to validate
:rtype: :class:`.WaterButlerPath`
:raises: :class:`.NotFoundError`
"""
raise NotImplementedError
def path_from_metadata(self,
parent_path: wb_path.WaterButlerPath,
meta_data: wb_metadata.BaseMetadata) -> wb_path.WaterButlerPath:
return parent_path.child(meta_data.name, _id=meta_data.path.strip('/'),
folder=meta_data.is_folder)
async def revisions(self, path: wb_path.WaterButlerPath, **kwargs):
"""Return a list of :class:`.BaseFileRevisionMetadata` objects representing the revisions
available for the file at ``path``.
"""
return [] # TODO Raise 405 by default h/t @rliebz
async def create_folder(self, path: wb_path.WaterButlerPath,
**kwargs) -> wb_metadata.BaseFolderMetadata:
"""Create a folder in the current provider at `path`. Returns a `BaseFolderMetadata` object
if successful. May throw a 409 Conflict if a directory with the same name already exists.
:param path: ( :class:`.WaterButlerPath` ) User-supplied path to create. Must be a directory.
:rtype: :class:`.BaseFileMetadata`
:raises: :class:`.CreateFolderError`
"""
raise exceptions.ProviderError({'message': 'Folder creation not supported.'}, code=405)
def _build_range_header(self, slice_tup: typing.Tuple[int, int]) -> str:
start, end = slice_tup
return 'bytes={}-{}'.format(
'' if start is None else start,
'' if end | |
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A script used to manage Google Maven dependencies for Chromium.
For each dependency in `build.gradle`:
- Download the library
- Download the LICENSE
- Generate a README.chromium file
- Generate a GN target in BUILD.gn
- Generate .info files for AAR libraries
- Generate CIPD yaml files describing the packages
- Generate a 'deps' entry in DEPS.
"""
import argparse
import collections
import concurrent.futures
import contextlib
import fnmatch
import logging
import tempfile
import textwrap
import os
import re
import shutil
import subprocess
import zipfile
# Assume this script is stored under third_party/android_deps/
_CHROMIUM_SRC = os.path.normpath(os.path.join(__file__, '..', '..', '..'))
# Default android_deps directory.
_PRIMARY_ANDROID_DEPS_DIR = os.path.join(_CHROMIUM_SRC, 'third_party',
'android_deps')
# Path to additional_readme_paths.json relative to custom 'android_deps' directory.
_ADDITIONAL_README_PATHS = 'additional_readme_paths.json'
# Path to BUILD.gn file from custom 'android_deps' directory.
_BUILD_GN = 'BUILD.gn'
# Path to build.gradle file relative to custom 'android_deps' directory.
_BUILD_GRADLE = 'build.gradle'
# Location of the android_deps libs directory relative to custom 'android_deps' directory.
_LIBS_DIR = 'libs'
_GN_PATH = os.path.join(_CHROMIUM_SRC, 'third_party', 'depot_tools', 'gn')
_GRADLEW = os.path.join(_CHROMIUM_SRC, 'third_party', 'gradle_wrapper',
'gradlew')
# Git-controlled files needed by, but not updated by this tool.
# Relative to _PRIMARY_ANDROID_DEPS_DIR.
_PRIMARY_ANDROID_DEPS_FILES = [
'buildSrc',
'licenses',
'settings.gradle.template',
'vulnerability_supressions.xml',
]
# Git-controlled files needed by and updated by this tool.
# Relative to args.android_deps_dir.
_CUSTOM_ANDROID_DEPS_FILES = [
os.path.join('..', '..', 'DEPS'),
_BUILD_GN,
_ADDITIONAL_README_PATHS,
'subprojects.txt',
]
# If this file exists in an aar file then it is appended to LICENSE
_THIRD_PARTY_LICENSE_FILENAME = 'third_party_licenses.txt'
# Path to the aar.py script used to generate .info files.
_AAR_PY = os.path.join(_CHROMIUM_SRC, 'build', 'android', 'gyp', 'aar.py')
@contextlib.contextmanager
def BuildDir(dirname=None):
"""Helper function used to manage a build directory.
Args:
dirname: Optional build directory path. If not provided, a temporary
directory will be created and cleaned up on exit.
Returns:
A python context manager modelling a directory path. The manager
removes the directory if necessary on exit.
"""
delete = False
if not dirname:
dirname = tempfile.mkdtemp()
delete = True
try:
yield dirname
finally:
if delete:
shutil.rmtree(dirname)
def RaiseCommandException(args, returncode, output, error):
"""Raise an exception whose message describing a command failure.
Args:
args: shell command-line (as passed to subprocess.call())
returncode: status code.
error: standard error output.
Raises:
a new Exception.
"""
message = 'Command failed with status {}: {}\n'.format(returncode, args)
if output:
message += 'Output:-----------------------------------------\n{}\n' \
'------------------------------------------------\n'.format(output)
if error:
message += 'Error message: ---------------------------------\n{}\n' \
'------------------------------------------------\n'.format(error)
raise Exception(message)
def RunCommand(args, print_stdout=False, cwd=None):
"""Run a new shell command.
This function runs without printing anything.
Args:
args: A string or a list of strings for the shell command.
Raises:
On failure, raise an Exception that contains the command's arguments,
return status, and standard output + error merged in a single message.
"""
logging.debug('Run %s', args)
stdout = None if print_stdout else subprocess.PIPE
p = subprocess.Popen(args, stdout=stdout, cwd=cwd)
pout, _ = p.communicate()
if p.returncode != 0:
RaiseCommandException(args, p.returncode, None, pout)
def RunCommandAndGetOutput(args):
"""Run a new shell command. Return its output. Exception on failure.
This function runs without printing anything.
Args:
args: A string or a list of strings for the shell command.
Returns:
The command's output.
Raises:
On failure, raise an Exception that contains the command's arguments,
return status, and standard output, and standard error as separate
messages.
"""
logging.debug('Run %s', args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pout, perr = p.communicate()
if p.returncode != 0:
RaiseCommandException(args, p.returncode, pout, perr)
return pout
def MakeDirectory(dir_path):
"""Make directory |dir_path| recursively if necessary."""
if dir_path != '' and not os.path.isdir(dir_path):
logging.debug('mkdir [%s]', dir_path)
os.makedirs(dir_path)
def DeleteDirectory(dir_path):
"""Recursively delete a directory if it exists."""
if os.path.exists(dir_path):
logging.debug('rmdir [%s]', dir_path)
shutil.rmtree(dir_path)
def Copy(src_dir, src_paths, dst_dir, dst_paths, src_path_must_exist=True):
"""Copies |src_paths| in |src_dir| to |dst_paths| in |dst_dir|.
Args:
src_dir: Directory containing |src_paths|.
src_paths: Files to copy.
dst_dir: Directory containing |dst_paths|.
dst_paths: Copy destinations.
src_paths_must_exist: If False, do not throw error if the file for one of
|src_paths| does not exist.
"""
assert len(src_paths) == len(dst_paths)
missing_files = []
for src_path, dst_path in zip(src_paths, dst_paths):
abs_src_path = os.path.join(src_dir, src_path)
abs_dst_path = os.path.join(dst_dir, dst_path)
if os.path.exists(abs_src_path):
CopyFileOrDirectory(abs_src_path, abs_dst_path)
elif src_path_must_exist:
missing_files.append(src_path)
if missing_files:
raise Exception('Missing files from {}: {}'.format(
src_dir, missing_files))
def CopyFileOrDirectory(src_path, dst_path, ignore_extension=None):
"""Copy file or directory |src_path| into |dst_path| exactly.
Args:
src_path: Source path.
dst_path: Destination path.
ignore_extension: File extension of files not to copy, starting with '.'. If None, all files
are copied.
"""
assert not ignore_extension or ignore_extension[0] == '.'
src_path = os.path.normpath(src_path)
dst_path = os.path.normpath(dst_path)
logging.debug('copy [%s -> %s]', src_path, dst_path)
MakeDirectory(os.path.dirname(dst_path))
if os.path.isdir(src_path):
# Copy directory recursively.
DeleteDirectory(dst_path)
ignore = None
if ignore_extension:
ignore = shutil.ignore_patterns('*' + ignore_extension)
shutil.copytree(src_path, dst_path, ignore=ignore)
elif not ignore_extension or not re.match('.*\.' + ignore_extension[1:],
src_path):
shutil.copy(src_path, dst_path)
def ReadFile(file_path):
"""Read a file, return its content."""
with open(file_path) as f:
return f.read()
def ReadFileAsLines(file_path):
"""Read a file as a series of lines."""
with open(file_path) as f:
return f.readlines()
def WriteFile(file_path, file_data):
"""Write a file."""
if isinstance(file_data, str):
file_data = file_data.encode('utf8')
MakeDirectory(os.path.dirname(file_path))
with open(file_path, 'wb') as f:
f.write(file_data)
def FindInDirectory(directory, filename_filter):
"""Find all files in a directory that matches a given filename filter."""
files = []
for root, _dirnames, filenames in os.walk(directory):
matched_files = fnmatch.filter(filenames, filename_filter)
files.extend((os.path.join(root, f) for f in matched_files))
return files
# Named tuple describing a CIPD package.
# - path: Path to cipd.yaml file.
# - name: cipd package name.
# - tag: cipd tag.
CipdPackageInfo = collections.namedtuple('CipdPackageInfo',
['path', 'name', 'tag'])
# Regular expressions used to extract useful info from cipd.yaml files
# generated by Gradle. See BuildConfigGenerator.groovy:makeCipdYaml()
_RE_CIPD_CREATE = re.compile('cipd create --pkg-def cipd.yaml -tag (\S*)')
_RE_CIPD_PACKAGE = re.compile('package: (\S*)')
def _ParseSubprojects(subproject_path):
"""Parses listing of subproject build.gradle files. Returns list of paths."""
if not os.path.exists(subproject_path):
return None
subprojects = []
for subproject in open(subproject_path):
subproject = subproject.strip()
if subproject and not subproject.startswith('#'):
subprojects.append(subproject)
return subprojects
def _GenerateSettingsGradle(subproject_dirs, settings_template_path,
settings_out_path):
"""Generates settings file by replacing "{{subproject_dirs}}" string in template.
Args:
subproject_dirs: List of subproject directories to substitute into template.
settings_template_path: Path of template file to substitute into.
settings_out_path: Path of output settings.gradle file.
"""
with open(settings_template_path) as f:
template_content = f.read()
subproject_dirs_str = ''
if subproject_dirs:
subproject_dirs_str = '\'' + '\',\''.join(subproject_dirs) + '\''
template_content = template_content.replace('{{subproject_dirs}}',
subproject_dirs_str)
with open(settings_out_path, 'w') as f:
f.write(template_content)
def _BuildGradleCmd(build_android_deps_dir, task):
cmd = [
_GRADLEW, '-b',
os.path.join(build_android_deps_dir, _BUILD_GRADLE), '--stacktrace',
task
]
settings_gradle_path = os.path.join(build_android_deps_dir,
'settings.gradle')
if os.path.exists(settings_gradle_path):
cmd += ['-c', os.path.abspath(settings_gradle_path)]
return cmd
def _CheckVulnerabilities(build_android_deps_dir, report_dst):
logging.warning('Running Gradle dependencyCheckAnalyze. This may take a '
'few minutes the first time.')
# Separate command from main gradle command so that we can provide specific
# diagnostics in case of failure of this step.
gradle_cmd = _BuildGradleCmd(build_android_deps_dir,
'dependencyCheckAnalyze')
report_src = os.path.join(build_android_deps_dir, 'build', 'reports')
if os.path.exists(report_dst):
shutil.rmtree(report_dst)
try:
logging.info('CMD: %s', ' '.join(gradle_cmd))
subprocess.run(gradle_cmd, check=True)
except subprocess.CalledProcessError:
report_path = os.path.join(report_dst, 'dependency-check-report.html')
logging.error(
textwrap.dedent("""
=============================================================================
A package has a known vulnerability. It may not be in a package or packages
which you just added, but you need to resolve the problem before proceeding.
If you can't easily fix it by rolling the package to a fixed version now,
please file a crbug of type= Bug-Security providing all relevant information,
and then rerun this command with --ignore-vulnerabilities.
The html version of the report is avialable at: {}
=============================================================================
""".format(report_path)))
raise
finally:
if os.path.exists(report_src):
CopyFileOrDirectory(report_src, report_dst)
def GetCipdPackageInfo(cipd_yaml_path):
"""Returns the CIPD package name corresponding to a given cipd.yaml file.
Args:
cipd_yaml_path: Path of input cipd.yaml file.
Returns:
A (package_name, package_tag) tuple.
Raises:
Exception if the file could not be read.
"""
package_name = None
package_tag = None
for line in ReadFileAsLines(cipd_yaml_path):
m = _RE_CIPD_PACKAGE.match(line)
if m:
package_name = m.group(1)
m = _RE_CIPD_CREATE.search(line)
if m:
package_tag = m.group(1)
if not package_name or not package_tag:
raise Exception('Invalid cipd.yaml format: ' + cipd_yaml_path)
return (package_name, package_tag)
def ParseDeps(root_dir, libs_dir):
"""Parse an android_deps/libs and retrieve package information.
Args:
root_dir: Path to a root Chromium or build directory.
Returns:
A directory mapping package names to tuples of
(cipd_yaml_file, package_name, package_tag), where |cipd_yaml_file|
is the path to the cipd.yaml file, related to |libs_dir|,
and |package_name| and |package_tag| are the extracted from it.
"""
result = {}
root_dir = os.path.abspath(root_dir)
libs_dir = os.path.abspath(os.path.join(root_dir, libs_dir))
for cipd_file in FindInDirectory(libs_dir, 'cipd.yaml'):
pkg_name, pkg_tag = GetCipdPackageInfo(cipd_file)
cipd_path = | |
run in range(nruns):
# print(run)
# Copy the full graph
G_full = copy.deepcopy(G_full_orig)
G_full_cycles = nx.cycle_basis(G_full)
# Randomly remove edges from cycles until no more cycles remain
while len(G_full_cycles)>0:
next_cycle = G_full_cycles[0]
complete_next_cycle = tuple(next_cycle) + (next_cycle[0],)
complete_next_cycle_edges = []
for i in range(len(complete_next_cycle)-1):
possible_e = (complete_next_cycle[i], complete_next_cycle[i+1])
if possible_e in edges:
complete_next_cycle_edges.append(possible_e)
elif (possible_e[1], possible_e[0]) in edges:
complete_next_cycle_edges.append((possible_e[1], possible_e[0]))
else:
raise ValueError('find_max_value: edge found in cycle that is not found in edge list')
edge_order = np.random.permutation(len(complete_next_cycle_edges))
remove_e_candidates = [complete_next_cycle_edges[i] for i in edge_order]
removed = False
i = 0
while not removed and i < len(remove_e_candidates):
remove_e_candidate = remove_e_candidates[i]
if remove_e_candidate in G_full.edges:
G_full.remove_edge(remove_e_candidate[0], remove_e_candidate[1])
removed = True
else:
i += 1
G_full_cycles = nx.cycle_basis(G_full)
assert len(G_full_cycles) == 0, 'There are still cycles in the graph'
# For any edge that is present in G_full, evaluate G_full with and without e
max_values = evaluate_removing_edges_from_spanning_tree(G_full, vertices, edges, demand, max_values)
assert sum([max_values[k] >= 0 for k in edges]) == len(edges), 'find_max_values: some edges were not evaluated in the sampled random spanning trees'
print('Finding max values with %d sampled spanning trees took %0.2f seconds'%(nruns, time.time() - master_timer))
return max_values
def budget_pcsf_semigrad_ascent(G, values, max_values, weights,
capacity, demand,
init_method='empty', mlb_selection='alternating', mlb_max_method='kr-avoid-cycles', fix_cycles_method = 'greedy', fix_cycles_obj = 'budget',
instancestring='', logfiledir='', resultfiledir='', verbose=True):
'''Semigradient-based supermodular function maximization subject to a combination of a knapsack and a matroid constraint.
Inputs:
G - list of edges that are candidates for selection
values - dict containing the modular value of each edge
weights - dict containing the cost of each edge
capacity - budget for selecting edges
demand - demand between pairs of vertices
init_method - whether to start with 'empty', 'full', or 'greedy' set
mlb_max_method - 'knapsackmip_repair' or 'greedy'
Returns:
selected_items - set of selected edges
selected_items_weight - total cost of selected edges
selected_items_mlb_value - total modular value of selected edges
selected_items_true_value - true value of selected edges
'''
solverunid = np.random.randint(10000)
logfilename = logfiledir+instancestring+'_'+str(solverunid)+'.log'
sys.stdout = open(logfilename, 'w')
V = [v.index for v in G.vs()]
# num_vertices = len(V)
E = G.get_edgelist()
E_candidates = copy.deepcopy(E)
timer = time.time()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# INITIALIZE SOLUTION
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if verbose:
print('Initializing with %s set.'%init_method)
if init_method is 'empty':
selected_items_current = set()
elif init_method is 'full':
selected_items_current = set(E_candidates)
elif init_method is 'random':
edge_order = np.random.permutation(len(E_candidates))
random_edges = []
k = 0
while (sum([weights[i] for i in random_edges])) < capacity:
random_edges.append(E_candidates[edge_order[k]])
k+= 1
selected_items_current = set(random_edges)
elif init_method is 'greedy':
# # if greedy solution was not already computed
# selected_items_current = budget_pcsf_greedy(G, weights, demand, capacity, by_ratio=True, instancestring='', logfiledir='', resultfiledir='', verbose=False)[0]
# selected_items_current = set(selected_items_current)
# if greedy solution has already been computed, load from result
selected_items_current = set()
greedyrdir = '../notebooks/synthetic_graphs/greedy_results_2/'
#greedyrfile = ['_'.join((f.split('.csv')[0]).split('_')[:-1]) for f in os.listdir(greedyrdir) if os.path.isfile(os.path.join(greedyrdir,f)) and f[0]=='G']
greedyrfile = [f for f in os.listdir(greedyrdir) if os.path.isfile(os.path.join(greedyrdir,f)) and f[0]=='G' and ('_'.join((f.split('.csv')[0]).split('_')[:-1]) == instancestring)][0]
with open(greedyrdir+greedyrfile,'r') as gr:
for line in gr:
if 'x_e' in line:
pass
else:
edge_ij = line.strip()[:-1]
edge_x = line.strip()[-1]
if edge_x == 1:
selected_items_current.union(eval(edge_ij))
f_selected_items_current = evaluate_solution(selected_items_current, V, demand)[0]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SEMIGRADIENT MLB MAXIMIZATION
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
converged = False
iteration = 0
if mlb_selection == 'alternating':
mlb1_converged = False
mlb2_converged = False
current_mlb = 'mlb1' # start with MLB1
while not converged:
if verbose:
print('--------Semigradient Ascent Iteration %d --------'%iteration)
# pick a subdifferential-based semigradient
if current_mlb == 'mlb1': # use MLB1
# compute MLB1 coefficients based on semigradient at current solution
mlb_coeff = dict.fromkeys(values.keys())
selected_items_count = 0
non_selected_items_count = 0
for e in E_candidates:
if e not in selected_items_current:
non_selected_items_count += 1
selected_items_temp_copy = copy.deepcopy(selected_items_current)
G_add_e = evaluate_solution(selected_items_temp_copy.union([e]), V, demand)[1]
G_add_e_cycles = nx.cycle_basis(G_add_e)
# if adding the edge would create a cycle, set its mlb coefficient to 0
if len(G_add_e_cycles)>0:
mlb_coeff[e] = 0
else:
mlb_coeff[e] = values[e]
# just set mlb coeff straight to f(j|empty)
#mlb_coeff[e] = values[e]
else:
selected_items_count += 1
selected_items_temp_copy = copy.deepcopy(selected_items_current)
f_adde_selected_items_minuse = f_selected_items_current - evaluate_solution(selected_items_temp_copy.difference([e]), V, demand)[0]
mlb_coeff[e] = f_adde_selected_items_minuse
assert selected_items_count+non_selected_items_count == len(E_candidates), 'budget_pcsf_semigrad_ascent: during mlb_coeff computation, edges in solution and not in solution do not add up to |E|'
elif current_mlb == 'mlb2': # use MLB2
# compute MLB1 coefficients based on semigradient at current solution
mlb_coeff = dict.fromkeys(values.keys())
selected_items_count = 0
non_selected_items_count = 0
for e in E_candidates:
if e not in selected_items_current:
non_selected_items_count += 1
selected_items_temp_copy = copy.deepcopy(selected_items_current)
f_selected_items_adde, G_add_e = evaluate_solution(selected_items_temp_copy.union([e]), V, demand)
G_add_e_cycles = nx.cycle_basis(G_add_e)
# if adding the edge would create a cycle, set its mlb coefficient to 0
if len(G_add_e_cycles)>0:
mlb_coeff[e] = 0
else:
mlb_coeff[e] = f_selected_items_adde - f_selected_items_current
# just set mlb coeff straight to f(j|S)
#mlb_coeff[e] = f_selected_items_adde - f_selected_items_current
# j|S seems problematic in terms of MLB, try j|empty
#mlb_coeff[e] = values[e]
else:
selected_items_count += 1
mlb_coeff[e] = max_values[e]
assert selected_items_count+non_selected_items_count == len(E_candidates), 'budget_pcsf_semigrad_ascent: during mlb_coeff computation, edges in solution and not in solution do not add up to |E|'
## DEBUG
# print('current MLB coeffs:')
# print(mlb_coeff)
# maximize the MLB based on the selected semigradient
if mlb_max_method == 'greedy':
selected_items_new, selected_items_new_weight, selected_items_new_mlb_value = budget_pcsf_mlb_greedy(E_candidates, mlb_coeff, weights, capacity, V, demand, ratio=True, instancestring = '', logfiledir = '', resultfiledir = '', appendlog=logfilename, verbose = verbose) ### add demand arg
elif mlb_max_method == 'kr':
selected_items_new, selected_items_new_weight, selected_items_new_mlb_value = budget_pcsf_mlb_knapsack_repair(E_candidates, mlb_coeff, weights, capacity, V, demand, knapsack_avoid_cycles = False, fix_cycles_method = fix_cycles_method, fix_cycles_obj = fix_cycles_obj, instancestring = '', logfiledir = '', resultfiledir = '', appendlog=logfilename, verbose = verbose)
elif mlb_max_method == 'kr-avoid-cycles':
selected_items_new, selected_items_new_weight, selected_items_new_mlb_value = budget_pcsf_mlb_knapsack_repair(E_candidates, mlb_coeff, weights, capacity, V, demand, knapsack_avoid_cycles = True, fix_cycles_method = fix_cycles_method, fix_cycles_obj = fix_cycles_obj, instancestring = '', logfiledir = '', resultfiledir = '', appendlog=logfilename, verbose = verbose)
else:
raise ValueError('budget_pcsf_semigrad_ascent: invalid mlb_max_method provided')
## DEBUG
# check the bound inequality holds
# print('current selected items:')
# print(selected_items_current)
# print('new selected items:')
# print(selected_items_new)
#print('current selected items true value:')
#print(f_selected_items_current)
#print('current selected items MLB value:')
#print(sum([mlb_coeff[i] for i in selected_items_current]))
#print('new selected items MLB value:')
#print(selected_items_new_mlb_value)
# print('new selected items MLB partial value:')
LHS = f_selected_items_current + selected_items_new_mlb_value - sum([mlb_coeff[i] for i in selected_items_current])
f_selected_items_new = evaluate_solution(selected_items_new, V, demand)[0]
if verbose:
print(f_selected_items_current, LHS, f_selected_items_new)
if LHS < 0:
print('MLB IS NEGATIVE')
else:
print('MLB IS NON-NEGATIVE!!!!!!!!!!!')
#assert LHS <= f_selected_items_new, 'budget_pcsf_semigrad_ascent: after MLB maximization, MLB inequality violated--check bound computation'
# check the results are within budget
assert selected_items_new_weight <= capacity + 0.0000000001, 'budget_pcsf_semigrad_ascent: after MLB maximization, selected items exceed weight capacity'
# check for convergence
if selected_items_new == selected_items_current:
if current_mlb == 'mlb1':
mlb1_converged = True
if verbose:
print('MLB1 converged')
current_mlb = 'mlb2' # switch to mlb2
selected_items_current = selected_items_new
f_selected_items_current = f_selected_items_new
iteration += 1
elif current_mlb == 'mlb2':
mlb2_converged = True
if verbose:
print('MLB2 converged')
current_mlb = 'mlb1' # switch to mlb1
selected_items_current = selected_items_new
f_selected_items_current = f_selected_items_new
iteration += 1
if mlb1_converged and mlb2_converged:
converged = True
else:
if verbose:
print('Convergence broken')
selected_items_current = selected_items_new
f_selected_items_current = f_selected_items_new
iteration += 1
mlb1_converged = False
mlb2_converged = False
if iteration > 100:
break
print('Semigradient maximization took %0.2f seconds'%(time.time()-timer))
# selected_items_mlb_value = sum(values[i] for i in selected_items)
selected_items_current_weight = sum([weights[i] for i in selected_items_current])
assert selected_items_current_weight <= capacity + 0.0000000001, 'budget_pcsf_semigrad_ascent: final selected items exceed weight capacity'
| |
def carrier_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "carrier_ip", value)
@property
@pulumi.getter(name="customerOwnedIp")
def customer_owned_ip(self) -> Optional[pulumi.Input[str]]:
"""
Customer owned IP.
"""
return pulumi.get(self, "customer_owned_ip")
@customer_owned_ip.setter
def customer_owned_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_owned_ip", value)
@property
@pulumi.getter(name="customerOwnedIpv4Pool")
def customer_owned_ipv4_pool(self) -> Optional[pulumi.Input[str]]:
"""
ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing).
"""
return pulumi.get(self, "customer_owned_ipv4_pool")
@customer_owned_ipv4_pool.setter
def customer_owned_ipv4_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_owned_ipv4_pool", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Indicates if this EIP is for use in VPC (`vpc`) or EC2 Classic (`standard`).
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def instance(self) -> Optional[pulumi.Input[str]]:
"""
EC2 instance ID.
"""
return pulumi.get(self, "instance")
@instance.setter
def instance(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance", value)
@property
@pulumi.getter(name="networkBorderGroup")
def network_border_group(self) -> Optional[pulumi.Input[str]]:
"""
Location from which the IP address is advertised. Use this parameter to limit the address to this location.
"""
return pulumi.get(self, "network_border_group")
@network_border_group.setter
def network_border_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_border_group", value)
@property
@pulumi.getter(name="networkInterface")
def network_interface(self) -> Optional[pulumi.Input[str]]:
"""
Network interface ID to associate with.
"""
return pulumi.get(self, "network_interface")
@network_interface.setter
def network_interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface", value)
@property
@pulumi.getter(name="privateDns")
def private_dns(self) -> Optional[pulumi.Input[str]]:
"""
The Private DNS associated with the Elastic IP address (if in VPC).
"""
return pulumi.get(self, "private_dns")
@private_dns.setter
def private_dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_dns", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
Contains the private IP address (if in VPC).
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter(name="publicDns")
def public_dns(self) -> Optional[pulumi.Input[str]]:
"""
Public DNS associated with the Elastic IP address.
"""
return pulumi.get(self, "public_dns")
@public_dns.setter
def public_dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_dns", value)
@property
@pulumi.getter(name="publicIp")
def public_ip(self) -> Optional[pulumi.Input[str]]:
"""
Contains the public IP address.
"""
return pulumi.get(self, "public_ip")
@public_ip.setter
def public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ip", value)
@property
@pulumi.getter(name="publicIpv4Pool")
def public_ipv4_pool(self) -> Optional[pulumi.Input[str]]:
"""
EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
"""
return pulumi.get(self, "public_ipv4_pool")
@public_ipv4_pool.setter
def public_ipv4_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ipv4_pool", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def vpc(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean if the EIP is in a VPC or not.
"""
return pulumi.get(self, "vpc")
@vpc.setter
def vpc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vpc", value)
class Eip(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides an Elastic IP resource.
> **Note:** EIP may require IGW to exist prior to association. Use `depends_on` to set an explicit dependency on the IGW.
> **Note:** Do not use `network_interface` to associate the EIP to `lb.LoadBalancer` or `ec2.NatGateway` resources. Instead use the `allocation_id` available in those resources to allow AWS to manage the association, otherwise you will see `AuthFailure` errors.
## Example Usage
### Single EIP associated with an instance
```python
import pulumi
import pulumi_aws as aws
lb = aws.ec2.Eip("lb",
instance=aws_instance["web"]["id"],
vpc=True)
```
### Multiple EIPs associated with a single network interface
```python
import pulumi
import pulumi_aws as aws
multi_ip = aws.ec2.NetworkInterface("multi-ip",
subnet_id=aws_subnet["main"]["id"],
private_ips=[
"10.0.0.10",
"10.0.0.11",
])
one = aws.ec2.Eip("one",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.10")
two = aws.ec2.Eip("two",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.11")
```
### Attaching an EIP to an Instance with a pre-assigned private ip (VPC Only)
```python
import pulumi
import pulumi_aws as aws
default = aws.ec2.Vpc("default",
cidr_block="10.0.0.0/16",
enable_dns_hostnames=True)
gw = aws.ec2.InternetGateway("gw", vpc_id=default.id)
tf_test_subnet = aws.ec2.Subnet("tfTestSubnet",
vpc_id=default.id,
cidr_block="10.0.0.0/24",
map_public_ip_on_launch=True,
opts=pulumi.ResourceOptions(depends_on=[gw]))
foo = aws.ec2.Instance("foo",
ami="ami-5189a661",
instance_type="t2.micro",
private_ip="10.0.0.12",
subnet_id=tf_test_subnet.id)
bar = aws.ec2.Eip("bar",
vpc=True,
instance=foo.id,
associate_with_private_ip="10.0.0.12",
opts=pulumi.ResourceOptions(depends_on=[gw]))
```
### Allocating EIP from the BYOIP pool
```python
import pulumi
import pulumi_aws as aws
byoip_ip = aws.ec2.Eip("byoip-ip",
public_ipv4_pool="ipv4pool-ec2-012345",
vpc=True)
```
## Import
EIPs in a VPC can be imported using their Allocation ID, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar eipalloc-00a10e96
```
EIPs in EC2 Classic can be imported using their Public IP, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar 172.16.31.10
```
[1]https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: IP address from an EC2 BYOIP pool. This option is only available for VPC EIPs.
:param pulumi.Input[str] associate_with_private_ip: User-specified primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.
:param pulumi.Input[str] customer_owned_ipv4_pool: ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing).
:param pulumi.Input[str] instance: EC2 instance ID.
:param pulumi.Input[str] network_border_group: Location from which the IP address is advertised. Use this parameter to limit the address to this location.
:param pulumi.Input[str] network_interface: Network interface ID to associate with.
:param pulumi.Input[str] public_ipv4_pool: EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
:param pulumi.Input[bool] vpc: Boolean if the EIP is in a VPC or not.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[EipArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Elastic IP resource.
> **Note:** EIP may require IGW to exist prior to association. Use `depends_on` to set an explicit dependency on the IGW.
> **Note:** Do not use `network_interface` to associate the EIP to `lb.LoadBalancer` or `ec2.NatGateway` resources. Instead use the `allocation_id` available in those resources to allow AWS to manage the association, otherwise you will see `AuthFailure` errors.
## Example Usage
### Single EIP associated with an instance
```python
import pulumi
import pulumi_aws as aws
lb = aws.ec2.Eip("lb",
instance=aws_instance["web"]["id"],
vpc=True)
```
### Multiple EIPs associated with a single network interface
```python
import pulumi
import pulumi_aws as aws
multi_ip = aws.ec2.NetworkInterface("multi-ip",
subnet_id=aws_subnet["main"]["id"],
private_ips=[
"10.0.0.10",
"10.0.0.11",
])
one = aws.ec2.Eip("one",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.10")
two = aws.ec2.Eip("two",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.11")
```
### Attaching an EIP to an Instance with a pre-assigned private ip (VPC Only)
```python
import pulumi
import pulumi_aws as aws
default = aws.ec2.Vpc("default",
cidr_block="10.0.0.0/16",
enable_dns_hostnames=True)
gw = aws.ec2.InternetGateway("gw", vpc_id=default.id)
tf_test_subnet = aws.ec2.Subnet("tfTestSubnet",
vpc_id=default.id,
cidr_block="10.0.0.0/24",
map_public_ip_on_launch=True,
opts=pulumi.ResourceOptions(depends_on=[gw]))
foo = aws.ec2.Instance("foo",
ami="ami-5189a661",
instance_type="t2.micro",
private_ip="10.0.0.12",
subnet_id=tf_test_subnet.id)
bar = aws.ec2.Eip("bar",
vpc=True,
instance=foo.id,
associate_with_private_ip="10.0.0.12",
opts=pulumi.ResourceOptions(depends_on=[gw]))
```
### Allocating EIP from the BYOIP pool
```python
import pulumi
import pulumi_aws as aws
byoip_ip = aws.ec2.Eip("byoip-ip",
public_ipv4_pool="ipv4pool-ec2-012345",
vpc=True)
```
## Import
EIPs in a VPC can be imported using their Allocation ID, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar eipalloc-00a10e96
```
EIPs in EC2 Classic can be imported using their Public IP, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar 172.16.31.10
```
[1]https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html
:param str resource_name: The name of the resource.
:param EipArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EipArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EipArgs.__new__(EipArgs)
__props__.__dict__["address"] = address
__props__.__dict__["associate_with_private_ip"] = associate_with_private_ip
__props__.__dict__["customer_owned_ipv4_pool"] = customer_owned_ipv4_pool
__props__.__dict__["instance"] = instance
__props__.__dict__["network_border_group"] = network_border_group
__props__.__dict__["network_interface"] = network_interface
__props__.__dict__["public_ipv4_pool"] = public_ipv4_pool
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc"] = vpc
__props__.__dict__["allocation_id"] = None
__props__.__dict__["association_id"] = | |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.11.3
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class TagsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_tag(self, owner, body, **kwargs): # noqa: E501
"""Create tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_tag_with_http_info(owner, body, **kwargs) # noqa: E501
def create_tag_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Create tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_tag(self, owner, name, **kwargs): # noqa: E501
"""Delete tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_tag_with_http_info(owner, name, **kwargs) # noqa: E501
def delete_tag_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Delete tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_tag`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tag(self, owner, name, **kwargs): # noqa: E501
"""Get tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tag_with_http_info(owner, name, **kwargs) # noqa: E501
def get_tag_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_with_http_info(owner, name, async_req=True)
| |
from big_ol_pile_of_manim_imports import *
from active_projects.quaternions import *
W_COLOR = YELLOW
I_COLOR = GREEN
J_COLOR = RED
K_COLOR = BLUE
class QuaternionLabel(VGroup):
CONFIG = {
"decimal_config": {}
}
def __init__(self, quat, **kwargs):
VGroup.__init__(self, **kwargs)
dkwargs = dict(self.decimal_config)
decimals = VGroup()
decimals.add(DecimalNumber(quat[0], color=W_COLOR, **dkwargs))
dkwargs["include_sign"] = True
decimals.add(
DecimalNumber(quat[1], color=I_COLOR, **dkwargs),
DecimalNumber(quat[2], color=J_COLOR, **dkwargs),
DecimalNumber(quat[3], color=K_COLOR, **dkwargs),
)
self.add(
decimals[0],
decimals[1], TexMobject("i"),
decimals[2], TexMobject("j"),
decimals[3], TexMobject("k"),
)
self.arrange_submobjects(RIGHT, buff=SMALL_BUFF)
self.decimals = decimals
def set_value(self, quat):
for decimal, coord in zip(self.decimals, quat):
decimal.set_value(coord)
return self
class RandyPrism(Cube):
CONFIG = {
"height": 0.25,
"width": 1,
"depth": 1.2,
"fill_color": BLUE_D,
"fill_opacity": 0.9,
"stroke_color": WHITE,
"stroke_width": 1,
}
def __init__(self, **kwargs):
Cube.__init__(self, **kwargs)
self.set_height(1)
randy = Randolph(mode="pondering")
randy.set_height(0.8)
randy.rotate(TAU / 4, RIGHT)
randy.shift(0.7 * DOWN)
randy.set_shade_in_3d(True, z_index_as_group=True)
self.randy = randy
self.add(randy)
self.set_height(self.height, stretch=True)
self.set_width(self.width, stretch=True)
self.set_depth(self.depth, stretch=True)
self.center()
class Gimbal(VGroup):
CONFIG = {
"inner_r": 1.2,
"outer_r": 2.6,
}
def __init__(self, alpha=0, beta=0, gamma=0, inner_mob=None, **kwargs):
VGroup.__init__(self, **kwargs)
r1, r2, r3, r4, r5, r6, r7 = np.linspace(
self.inner_r, self.outer_r, 7
)
rings = VGroup(
self.get_ring(r5, r6),
self.get_ring(r3, r4),
self.get_ring(r1, r2),
)
for i, p1, p2 in [(0, r6, r7), (1, r4, r5), (2, r2, r3)]:
annulus = rings[i]
lines = VGroup(
Line(p1 * UP, p2 * UP),
Line(p1 * DOWN, p2 * DOWN),
)
lines.set_stroke(RED)
annulus.lines = lines
annulus.add(lines)
rings[1].lines.rotate(90 * DEGREES, about_point=ORIGIN)
rings.rotate(90 * DEGREES, RIGHT, about_point=ORIGIN)
rings.set_shade_in_3d(True)
self.rings = rings
self.add(rings)
if inner_mob is not None:
corners = [
inner_mob.get_corner(v1 + v2)
for v1 in [LEFT, RIGHT]
for v2 in [IN, OUT]
]
lines = VGroup()
for corner in corners:
corner[1] = 0
line = Line(
corner, self.inner_r * normalize(corner),
color=WHITE,
stroke_width=1
)
lines.add(line)
lines.set_shade_in_3d(True)
rings[2].add(lines, inner_mob)
# Rotations
angles = [alpha, beta, gamma]
for i, angle in zip(it.count(), angles):
vect = rings[i].lines[0].get_vector()
rings[i:].rotate(angle=angle, axis=vect)
def get_ring(self, in_r, out_r, angle=TAU / 4):
result = VGroup()
for start_angle in np.arange(0, TAU, angle):
start_angle += angle / 2
sector = AnnularSector(
inner_radius=in_r,
outer_radius=out_r,
angle=angle,
start_angle=start_angle
)
sector.set_fill(LIGHT_GREY, 0.8)
arcs = VGroup(*[
Arc(
angle=angle,
start_angle=start_angle,
radius=r
)
for r in [in_r, out_r]
])
arcs.set_stroke(BLACK, 1, opacity=0.5)
sector.add(arcs)
result.add(sector)
return result
# Scenes
class Introduction(QuaternionHistory):
CONFIG = {
"names_and_quotes": [
(
"<NAME>",
"""\\Huge ``the quaternion was not only not
required, but was a positive evil''"""
),
(
"<NAME>",
"""\\Huge ``Quaternions... though beautifully \\\\ ingenious,
have been an unmixed evil'' """
),
]
}
def construct(self):
title_word = TextMobject("Quaternions:")
title_equation = TexMobject(
"i^2 = j^2 = k^2 = ijk = -1",
tex_to_color_map={
"i": I_COLOR,
"j": J_COLOR,
"k": K_COLOR,
}
)
# label = QuaternionLabel([
# float(str((TAU * 10**(3 * k)) % 10)[:4])
# for k in range(4)
# ])
title = VGroup(title_word, title_equation)
title.arrange_submobjects(RIGHT)
title.to_edge(UP)
images_group = self.get_dissenter_images_quotes_and_names()
images_group.to_edge(DOWN)
images, quotes, names = images_group
for pair in images_group:
pair[1].align_to(pair[0], UP)
self.play(
FadeInFromDown(title_word),
Write(title_equation)
)
self.wait()
for image, name, quote in zip(images, names, quotes):
self.play(
FadeInFrom(image, 3 * DOWN),
FadeInFromLarge(name),
LaggedStart(
FadeIn, VGroup(*it.chain(*quote)),
lag_ratio=0.3,
run_time=2
)
)
self.wait(2)
self.play(
title.shift, 2 * UP,
*[
ApplyMethod(mob.shift, FRAME_WIDTH * vect / 2)
for pair in images_group
for mob, vect in zip(pair, [LEFT, RIGHT])
],
)
class WhoCares(TeacherStudentsScene):
def construct(self):
quotes = Group(*[
ImageMobject(
"CoderQuaternionResponse_{}".format(d),
height=2
)
for d in range(4)
])
logos = Group(*[
ImageMobject(name, height=0.5)
for name in [
"TwitterLogo",
"HackerNewsLogo",
"RedditLogo",
"YouTubeLogo",
]
])
for quote, logo in zip(quotes, logos):
logo.move_to(quote.get_corner(UR))
quote.add(logo)
quotes.arrange_submobjects_in_grid()
quotes.set_height(4)
quotes.to_corner(UL)
self.student_says(
"Um...who cares?",
target_mode="sassy",
added_anims=[self.teacher.change, "guilty"]
)
self.change_student_modes("angry", "sassy", "sad")
self.wait(2)
self.play(
RemovePiCreatureBubble(self.students[1]),
self.teacher.change, "raise_right_hand"
)
# self.play(
# LaggedStart(
# FadeInFromDown, quotes,
# run_time=3
# ),
# self.get_student_changes(*3 * ["pondering"], look_at_arg=quotes)
# )
# self.wait(2)
# # Show HN
# hn_quote = quotes[1]
# hn_context = TextMobject("news.ycombinator.com/item?id=17933908")
# hn_context.scale(0.7)
# hn_context.to_corner(UL)
# vr_headsets = VGroup()
# for pi in self.students:
# vr_headset = SVGMobject("VR_headset")
# vr_headset.set_fill(LIGHT_GREY, opacity=0.9)
# vr_headset.set_width(pi.eyes.get_width() + 0.3)
# vr_headset.move_to(pi.eyes)
# vr_headsets.add(vr_headset)
# self.play(
# hn_quote.scale, 2, {"about_edge": DL},
# FadeOutAndShift(quotes[0], 5 * UP),
# FadeOutAndShift(quotes[2], UR),
# FadeOutAndShift(quotes[3], RIGHT),
# FadeInFromDown(hn_context),
# )
# hn_rect = Rectangle(
# height=0.1 * hn_quote.get_height(),
# width=0.6 * hn_quote.get_width(),
# color=RED
# )
# hn_rect.move_to(hn_quote, UL)
# hn_rect.shift(0.225 * RIGHT + 0.75 * DOWN)
# self.play(
# ShowCreation(hn_rect),
# self.get_student_changes(
# "erm", "thinking", "confused",
# look_at_arg=hn_quote,
# )
# )
# self.add_foreground_mobjects(vr_headsets)
# self.play(
# LaggedStart(
# FadeInFrom, vr_headsets,
# lambda m: (m, UP),
# ),
# self.get_student_changes(
# *3 * ["sick"],
# look_at_arg=hn_quote,
# run_time=3
# )
# )
# self.wait(3)
# Show Twitter
t_quote = quotes[0]
# t_quote.next_to(FRAME_WIDTH * LEFT / 2 + FRAME_WIDTH * UP / 2, UR)
# t_quote.set_opacity(0)
# self.play(
# FadeOutAndShift(hn_quote, 4 * LEFT),
# FadeOutAndShift(hn_rect, 4 * LEFT),
# FadeOutAndShift(hn_context, UP),
# FadeOut(vr_headsets),
# t_quote.set_opacity, 1,
# t_quote.scale, 2,
# t_quote.to_corner, UL,
# )
# self.remove_foreground_mobjects(vr_headsets)
t_quote.fade(1)
t_quote.to_corner(UL)
self.play(
self.get_student_changes(*3 * ["pondering"], look_at_arg=quotes),
t_quote.set_opacity, 1,
t_quote.scale, 2,
t_quote.to_corner, UL,
)
self.wait(2)
self.change_student_modes(
"pondering", "happy", "tease",
look_at_arg=t_quote
)
self.wait(2)
self.play(FadeOut(t_quote))
self.wait(5)
class ShowSeveralQuaternionRotations(SpecialThreeDScene):
CONFIG = {
"quaternions": [
[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[0, -1, 2, 1],
[1, 0, 0, -1],
[1, -1, 0, 0],
[1, -1, 1, 0],
[1, -1, 1, -1],
[1, 0, 0, 0],
],
"start_phi": 70 * DEGREES,
"start_theta": -140 * DEGREES,
"ambient_rotation_rate": 0.01,
}
def construct(self):
self.add_q_tracker()
self.setup_labels()
self.setup_camera_position()
self.add_prism()
self.add_axes()
self.apply_quaternions()
def add_q_tracker(self):
self.q_tracker = QuaternionTracker()
self.q_tracker.add_updater(lambda m: m.normalize())
self.add(self.q_tracker)
def setup_labels(self):
left_q_label = QuaternionLabel([1, 0, 0, 0])
right_q_label = QuaternionLabel([1, 0, 0, 0])
for label in left_q_label, right_q_label:
lp, rp = TexMobject("()")
lp.next_to(label, LEFT, SMALL_BUFF)
rp.next_to(label, RIGHT, SMALL_BUFF)
label.add(lp, rp)
point_label = TexMobject(
*"(xi+yj+zk)",
tex_to_color_map={
"i": I_COLOR,
"j": J_COLOR,
"k": K_COLOR,
}
)
left_q_label.next_to(point_label, LEFT)
right_q_label.next_to(point_label, RIGHT)
group = VGroup(left_q_label, point_label, right_q_label)
group.arrange_submobjects(RIGHT)
group.set_width(FRAME_WIDTH - 1)
group.to_edge(UP)
self.add_fixed_in_frame_mobjects(BackgroundRectangle(group))
for label, text in zip(group, ["$q$", "Some 3d point", "$q^{-1}$"]):
brace = Brace(label, DOWN)
text_mob = TextMobject(text)
if text_mob.get_width() > brace.get_width():
text_mob.match_width(brace)
text_mob.next_to(brace, DOWN, buff=SMALL_BUFF)
text_mob.add_background_rectangle()
label.add(brace, text_mob)
self.add_fixed_in_frame_mobjects(*group)
left_q_label.add_updater(
lambda m: m.set_value(self.q_tracker.get_value())
)
left_q_label.add_updater(lambda m: self.add_fixed_in_frame_mobjects(m))
right_q_label.add_updater(
lambda m: m.set_value(quaternion_conjugate(
self.q_tracker.get_value()
))
)
right_q_label.add_updater(lambda m: self.add_fixed_in_frame_mobjects(m))
def setup_camera_position(self):
self.set_camera_orientation(
phi=self.start_phi,
theta=self.start_theta,
)
self.begin_ambient_camera_rotation(self.ambient_rotation_rate)
def add_prism(self):
prism = self.prism = self.get_prism()
prism.add_updater(
lambda p: p.become(self.get_prism(
self.q_tracker.get_value()
))
)
self.add(prism)
def add_axes(self):
axes = self.axes = updating_mobject_from_func(self.get_axes)
self.add(axes)
def apply_quaternions(self):
for quat in self.quaternions:
self.change_q(quat)
self.wait(2)
#
def get_unrotated_prism(self):
return RandyPrism().scale(2)
def get_prism(self, quaternion=[1, 0, 0, 0]):
prism = self.get_unrotated_prism()
angle, axis = angle_axis_from_quaternion(quaternion)
prism.rotate(angle=angle, axis=axis, about_point=ORIGIN)
return prism
def get_axes(self):
prism = self.prism
centers = [sm.get_center() for sm in prism[:6]]
axes = VGroup()
for i in range(3):
for u in [-1, 1]:
vect = np.zeros(3)
vect[i] = u
dots = [np.dot(normalize(c), vect) for c in centers]
max_i = np.argmax(dots)
ec = centers[max_i]
prism.get_edge_center(vect)
p1 = np.zeros(3)
p1[i] = ec[i]
p1 *= dots[max_i]
p2 = 10 * vect
axes.add(Line(p1, p2))
axes.set_stroke(LIGHT_GREY, 1)
axes.set_shade_in_3d(True)
return axes
def change_q(self, value, run_time=3, added_anims=None, **kwargs):
if added_anims is None:
added_anims = []
self.play(
self.q_tracker.set_value, value,
*added_anims,
run_time=run_time,
**kwargs
)
class PauseAndPlayOverlay(Scene):
def construct(self):
pause = TexMobject("=").rotate(TAU / 4)
pause.stretch(2, 0)
pause.scale(1.5)
arrow = Vector(RIGHT, color=WHITE)
interact = TextMobject("Interact...")
group = VGroup(pause, arrow, interact)
group.arrange_submobjects(RIGHT)
group.scale(2)
not_yet = TextMobject("...well, not yet")
not_yet.scale(2)
not_yet.next_to(group, DOWN, MED_LARGE_BUFF)
self.play(Write(pause))
self.play(
GrowFromPoint(
interact, arrow.get_left(),
rate_func=squish_rate_func(smooth, 0.3, 1)
),
VFadeIn(interact),
GrowArrow(arrow),
)
self.wait(2)
self.play(Write(not_yet))
self.wait()
class RotationMatrix(ShowSeveralQuaternionRotations):
CONFIG = {
"start_phi": 60 * DEGREES,
"start_theta": -60 * DEGREES,
}
def construct(self):
self.add_q_tracker()
self.setup_camera_position()
self.add_prism()
self.add_basis_vector_labels()
self.add_axes()
title = TextMobject("Rotation matrix")
title.scale(1.5)
title.to_corner(UL)
self.add_fixed_in_frame_mobjects(title)
angle = 75 * DEGREES
axis = [0.3, 1, 0.3]
matrix = rotation_matrix(angle=angle, axis=axis)
matrix_mob = DecimalMatrix(matrix, h_buff=1.6)
matrix_mob.next_to(title, DOWN)
matrix_mob.to_edge(LEFT)
title.next_to(matrix_mob, UP)
self.add_fixed_in_frame_mobjects(matrix_mob)
colors = [I_COLOR, J_COLOR, K_COLOR]
matrix_mob.set_column_colors(*colors)
columns = matrix_mob.get_columns()
column_rects = VGroup(*[
SurroundingRectangle(c).match_color(c[0])
for c in columns
])
labels = VGroup(*[
TextMobject(
"Where", tex, "goes",
tex_to_color_map={tex: rect.get_color()}
).next_to(rect, DOWN)
for letter, rect in zip(["\\i", "\\j", "k"], column_rects)
for tex in ["$\\hat{\\textbf{%s}}$" % (letter)]
])
labels.space_out_submobjects(0.8)
quaternion = quaternion_from_angle_axis(angle, axis)
self.play(Write(matrix_mob))
self.change_q(quaternion)
self.wait()
last_label = VectorizedPoint(matrix_mob.get_bottom())
last_rect = VMobject()
for label, rect in zip(labels, column_rects):
self.add_fixed_in_frame_mobjects(rect, label)
self.play(
FadeIn(label),
FadeOut(last_label),
ShowCreation(rect),
| |
'94937000',
'94938005',
'94939002',
'94940000',
'94941001',
'94942008',
'94943003',
'94944009',
'94945005',
'94946006',
'94947002',
'94948007',
'94949004',
'94950004',
'94951000',
'94952007',
'94953002',
'94954008',
'94955009',
'94956005',
'94957001',
'94958006',
'94959003',
'94960008',
'94961007',
'94962000',
'94963005',
'94964004',
'94965003',
'94966002',
'94967006',
'94968001',
'94969009',
'94970005',
'94971009',
'94972002',
'94973007',
'94974001',
'94975000',
'94976004',
'94977008',
'94978003',
'94979006',
'94980009',
'94982001',
'94983006',
'94984000',
'94985004',
'94986003',
'94987007',
'94988002',
'94989005',
'94990001',
'94991002',
'94992009',
'94993004',
'94994005',
'94995006',
'94996007',
'94997003',
'94998008',
'94999000',
'95000000',
'95001001',
'95002008',
'95003003',
'95004009',
'95005005',
'95006006',
'95007002',
'95008007',
'95009004',
'95010009',
'95011008',
'95012001',
'95013006',
'95014000',
'95015004',
'95016003',
'95017007',
'95018002',
'95019005',
'95020004',
'95021000',
'95022007',
'95023002',
'95024008',
'95025009',
'95026005',
'95027001',
'95028006',
'95029003',
'95030008',
'95031007',
'95032000',
'95033005',
'95034004',
'95035003',
'95036002',
'95037006',
'95038001',
'95039009',
'95040006',
'95041005',
'95042003',
'95043008',
'95044002',
'95045001',
'95046000',
'95047009',
'95048004',
'95049007',
'95050007',
'95051006',
'95052004',
'95053009',
'95054003',
'95055002',
'95056001',
'95057005',
'95058000',
'95059008',
'95060003',
'95061004',
'95062006',
'95063001',
'95064007',
'95065008',
'95066009',
'95067000',
'95068005',
'95069002',
'95070001',
'95071002',
'95072009',
'95073004',
'95074005',
'95075006',
'95076007',
'95077003',
'95078008',
'95079000',
'95080002',
'95081003',
'95082005',
'95083000',
'95084006',
'95085007',
'95086008',
'95087004',
'95088009',
'95089001',
'95090005',
'95091009',
'95092002',
'95093007',
'95094001',
'95095000',
'95096004',
'95097008',
'95098003',
'95099006',
'95100003',
'95101004',
'95102006',
'95103001',
'95104007',
'95105008',
'95106009',
'95107000',
'95108005',
'95109002',
'95110007',
'95111006',
'95112004',
'95113009',
'95115002',
'95116001',
'95118000',
'95119008',
'95120002',
'95121003',
'95122005',
'95123000',
'95124006',
'95125007',
'95126008',
'95127004',
'95128009',
'95129001',
'95130006',
'95131005',
'95132003',
'95133008',
'95134002',
'95135001',
'95136000',
'95137009',
'95138004',
'95139007',
'95140009',
'95141008',
'95142001',
'95143006',
'95144000',
'95145004',
'95146003',
'95147007',
'95148002',
'95149005',
'95150005',
'95151009',
'95152002',
'95153007',
'95154001',
'95155000',
'95156004',
'95157008',
'95158003',
'95159006',
'95160001',
'95161002',
'95162009',
'95163004',
'95164005',
'95165006',
'95166007',
'95167003',
'95168008',
'95169000',
'95170004',
'95171000',
'95172007',
'95173002',
'95174008',
'95175009',
'95176005',
'95177001',
'95178006',
'95179003',
'95180000',
'95181001',
'95182008',
'95183003',
'95184009',
'95185005',
'95186006',
'95187002',
'95188007',
'95192000',
'95193005',
'95194004',
'95209008',
'95210003',
'95214007',
'95224004',
'95225003',
'95226002',
'95230004',
'95231000',
'95260009',
'95261008',
'95263006',
'95264000'
}
class CardiacPacerInSitu(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of cardiac pacer in situ.
**Data Element Scope:** This value set may use Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of cardiac pacer in situ.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.526.3.368'
VALUE_SET_NAME = 'Cardiac Pacer in Situ'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'Z950'
}
SNOMEDCT = {
'441509002'
}
class CarrierOfPredominantlySexuallyTransmittedInfection(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent exposure to infections that are primarily transmitted through sexual contact.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with potential exposure to infection with a predominantly sexual mode of transmission.
**Exclusion Criteria:** Excludes codes for specific sexually transmitted infections.
"""
OID = '2.16.840.1.113883.3.464.1003.112.11.1023'
VALUE_SET_NAME = 'Carrier of Predominantly Sexually Transmitted Infection'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'Z202',
'Z224'
}
class CataractCongenital(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of congenital cataract.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of congenital cataract.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1412'
VALUE_SET_NAME = 'Cataract, Congenital'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'Q120'
}
SNOMEDCT = {
'204128001',
'204138006',
'204139003',
'21590003',
'253223002',
'253224008',
'253225009',
'253226005',
'253227001',
'28550007',
'29590001',
'66499004',
'76562003',
'79410001'
}
class CataractMatureOrHypermature(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of mature or hypermature cataract.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of mature or hypermature cataract, including senile or intumescent cataract.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1413'
VALUE_SET_NAME = 'Cataract, Mature or Hypermature'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H269'
}
SNOMEDCT = {
'193590000',
'217791000119107',
'264443002',
'267626000',
'347461000119107',
'347521000119103',
'347581000119104',
'849000',
'8656007',
'95724003'
}
class CataractPosteriorPolar(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of posterior polar cataract.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of posterior polar cataract, including capsular or subcapsular cataract.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1414'
VALUE_SET_NAME = 'Cataract, Posterior Polar'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H25041',
'H25042',
'H25043'
}
SNOMEDCT = {
'1078791000119109',
'1078801000119105',
'15639401000119105',
'15639441000119107',
'15639481000119102',
'15737881000119104',
'15737921000119106',
'15737961000119101',
'253225009',
'315353005',
'335831000119107',
'341441000119102',
'342821000119103',
'34533008',
'346691000119104',
'347561000119108',
'5318001'
}
class CataractSecondaryToOcularDisorders(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of cataract that is a secondary diagnosis to other ocular conditions.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of cataract secondary to degenerative or inflammatory ocular disorders, cataract with neovascularization, or subcapsular glaucomatous flecks.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1410'
VALUE_SET_NAME = 'Cataract Secondary to Ocular Disorders'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H26211',
'H26212',
'H26213',
'H26221',
'H26222',
'H26223'
}
SNOMEDCT = {
'15738161000119104',
'15738201000119109',
'15738241000119106',
'15738281000119101',
'15738321000119106',
'193600001',
'193602009',
'67733005',
'76309006'
}
class CentralCornealUlcer(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of central corneal ulcer.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of central corneal ulcer or keratitis.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1428'
VALUE_SET_NAME = 'Central Corneal Ulcer'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H16011',
'H16012',
'H16013'
}
SNOMEDCT = {
'231901007',
'332801000119108',
'338411000119106',
'344181000119103',
'397567009',
'397570008',
'42513006',
'7426009',
'95731004',
'95732006',
'95733001'
}
class CerebrovascularDiseaseStrokeTia(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent stroke and high risk diagnoses associated with stroke, including transient ischemic attack (TIA) and generalized ischemic cerebrovascular disease, which would indicate a patient has atherosclerotic cardiovascular disease (ASCVD) in relation to a cerebrovascular event.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category or attribute related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with ASCVD diagnoses of cerebrovascular origin.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113762.1.4.1047.44'
VALUE_SET_NAME = 'Cerebrovascular Disease, Stroke, TIA'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'G450',
'G451',
'G452',
'G458',
'G459',
'G460',
'G461',
'G462',
'G463',
'G464',
'G465',
'G466',
'G467',
'G468',
'I6300',
'I63011',
'I63012',
'I63013',
'I63019',
'I6302',
'I63031',
'I63032',
'I63033',
'I63039',
'I6309',
'I6310',
'I63111',
'I63112',
'I63113',
'I63119',
'I6312',
'I63131',
'I63132',
'I63133',
'I63139',
'I6319',
'I6320',
'I63211',
'I63212',
'I63213',
'I63219',
'I6322',
'I63231',
'I63232',
'I63233',
'I63239',
'I6329',
'I6330',
'I63311',
'I63312',
'I63313',
'I63319',
'I63321',
'I63322',
'I63323',
'I63329',
'I63331',
'I63332',
'I63333',
'I63339',
'I63341',
'I63342',
'I63343',
'I63349',
'I6339',
'I6340',
'I63411',
'I63412',
'I63413',
'I63419',
'I63421',
'I63422',
'I63423',
'I63429',
'I63431',
'I63432',
'I63433',
'I63439',
'I63441',
'I63442',
'I63443',
'I63449',
'I6349',
'I6350',
'I63511',
'I63512',
'I63513',
'I63519',
'I63521',
'I63522',
'I63523',
'I63529',
'I63531',
'I63532',
'I63533',
'I63539',
'I63541',
'I63542',
'I63543',
'I63549',
'I6359',
'I636',
'I6381',
'I6389',
'I639',
'I6900',
'I69010',
'I69011',
'I69012',
'I69013',
'I69014',
'I69015',
'I69018',
'I69019',
'I69020',
'I69021',
'I69022',
'I69023',
'I69028',
'I69031',
'I69032',
'I69033',
'I69034',
'I69039',
'I69041',
'I69042',
'I69043',
'I69044',
'I69049',
'I69051',
'I69052',
'I69053',
'I69054',
'I69059',
'I69061',
'I69062',
'I69063',
'I69064',
'I69065',
'I69069',
'I69090',
'I69091',
'I69092',
'I69093',
'I69098',
'I6910',
'I69110',
'I69111',
'I69112',
'I69113',
'I69114',
'I69115',
'I69118',
'I69119',
'I69120',
'I69121',
'I69122',
'I69123',
'I69128',
'I69131',
'I69132',
'I69133',
'I69134',
'I69139',
'I69141',
'I69142',
'I69143',
'I69144',
'I69149',
'I69151',
'I69152',
'I69153',
'I69154',
'I69159',
'I69161',
'I69162',
'I69163',
'I69164',
'I69165',
'I69169',
'I69190',
'I69191',
'I69192',
'I69193',
'I69198',
'I6920',
'I69210',
'I69211',
'I69212',
'I69213',
'I69214',
'I69215',
'I69218',
'I69219',
'I69220',
'I69221',
'I69222',
'I69223',
'I69228',
'I69231',
'I69232',
'I69233',
'I69234',
'I69239',
'I69241',
'I69242',
'I69243',
'I69244',
'I69249',
'I69251',
'I69252',
'I69253',
'I69254',
'I69259',
'I69261',
'I69262',
'I69263',
'I69264',
'I69265',
'I69269',
'I69290',
'I69291',
'I69292',
'I69293',
'I69298',
'I6930',
'I69310',
'I69311',
'I69312',
'I69313',
'I69314',
'I69315',
'I69318',
'I69319',
'I69320',
'I69321',
'I69322',
'I69323',
'I69328',
'I69331',
'I69332',
'I69333',
'I69334',
'I69339',
'I69341',
'I69342',
| |
redo.save()
#I'm going to vandalise a book
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM books WHERE username=? AND bookname=?", (username, bookname,))
r = c.fetchall()
#More changes to graph
if r[0][4] == "Y":
#Get owner's phone number
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM users WHERE username=?", (username,))
r = c.fetchall()[0]
l = []
#Read graph
with open("graph.gr", "r") as file:
re = DictReader(file)
for i in re:
if i["source"] != r[3] or i["end"] != pnumber: l.append(dict(i))
l.append({"source": r[3], "end": o})
#Output graph without deleted edge
with open("graph.gr", "w+") as file:
fwriter = writer(file, delimiter=',', quotechar='"')
fwriter.writerow(["source", "end"])
for i in l:
fwriter.writerow([i["source"], i["end"]])
book = Book(username, bookname)
book.load()
#Changing phone number messes things up
if field == "Phone Number":
pnumber = t["Last Name"]
#Yes, this is very hacky, but I didn't want to
#Write an edit function for my book and AVLTree
#Delete old row
row = book.pnumber(pnumber)
book.delete(pnumber)
book.save()
book.load()
#Insert new row
row[field] = t["Phone Number"]
book.insert(row)
book.save()
book.load()
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, success="Successfully changed!", av=av)
elif state == "redo":
book = Book(username, bookname)
book.load()
redo = Changes(username, bookname, "re")
redo.load()
#Nothing to be done
if redo.empty():
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, error="Nothing to redo!", av=av)
t = redo.top()
redo.pop()
redo.save()
#We need to redo an insert operation
if t["Type"] == "INSERT":
t.pop("Type", None)
#Push to undo stack
undo = Changes(username, bookname, "un")
undo.load()
undo.insert("DELETE", t)
undo.save()
#I'm going to touch your graph
if r[0][4] == "Y":
#Find user's phone number
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM users WHERE username=?", (username,))
r = c.fetchall()[0]
#Read graph
l = []
with open("graph.gr", "r") as file:
re = DictReader(file)
for i in re:
if i["source"] != r[3] or i["end"] != t["Phone Number"]: l.append(dict(i))
#Output graph without deleted edge
with open("graph.gr", "w+") as file:
fwriter = writer(file, delimiter=',', quotechar='"')
fwriter.writerow(["source", "end"])
for i in l:
fwriter.writerow([i["source"], i["end"]])
#Edit book
book.delete(t["Phone Number"])
book.save()
book.load()
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, success="Succesfully changed!", av=av)
#We need to redo a delete operation
elif t["Type"] == "DELETE":
t.pop("Type", None)
#Push to undo stack
undo = Changes(username, bookname, "un")
undo.load()
undo.insert("INSERT", t)
undo.save()
#Your graph is not safe
if r[0][4] == "Y":
#Current user's phone number
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM users WHERE username=?", (username,))
r = c.fetchall()[0]
#Append to edge list
with open("graph.gr", "a+") as file:
fwriter = writer(file, delimiter=',', quotechar='"')
fwriter.writerow([r[3], t["Phone Number"]])
#Edit book
book.insert(t)
book.save()
book.load()
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, success="Succesfully changed!", av=av)
elif t["Type"] == "EDIT":
#Assemble row to delete, and row to insert
#If you are wondering why it's first name / last name / phone number / email
#It's because the file was like this for insert and delete changes, so I used the space accordingly
#First Name -> Field Changed
#Last Name -> Old Value
#Phone Number -> New Value
#Email -> Phone Number
t.pop("Type", None)
field = t["First Name"]
o = t["Last Name"]
n = t["Phone Number"]
pnumber = t["Email"]
new = t
new["Phone Number"] = o
new["Last Name"] = n
#Changing phone number changes the key and messes up stuff
if field == "Phone Number":
new["Email"] = new["Phone Number"]
#Push to undo stack
undo = Changes(username, bookname, "un")
undo.load()
undo.insert("EDIT", new)
undo.save()
#Please let me borrow a book
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM books WHERE username=? AND bookname=?", (username, bookname,))
r = c.fetchall()
#Graph to be updated
if r[0][4] == "Y":
#Find owner's phone number
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM users WHERE username=?", (username,))
r = c.fetchall()[0]
l = []
#Read graph
with open("graph.gr", "r") as file:
re = DictReader(file)
for i in re:
if i["source"] != r[3] or i["end"] != pnumber: l.append(dict(i))
l.append({"source": r[3], "end": o})
#Output graph without deleted edge
with open("graph.gr", "w+") as file:
fwriter = writer(file, delimiter=',', quotechar='"')
fwriter.writerow(["source", "end"])
for i in l:
fwriter.writerow([i["source"], i["end"]])
book = Book(username, bookname)
book.load()
#Changing phone number messes things up
if field == "Phone Number":
pnumber = t["Last Name"]
#Yes, this is very hacky, but I didn't want to
#Write an edit function for my book and AVLTree
#Delete old row
row = book.pnumber(pnumber)
book.delete(pnumber)
book.save()
book.load()
#Insert new row
row[field] = t["Phone Number"]
book.insert(row)
book.save()
book.load()
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, success="Successfully changed!", av=av)
return redirect(url_for("books"))
except:
#An edit is to be made
#TODO: Allow edits for multiple fields with dynamic forms
#*vomits blood*
try:
#Get arguments
edit = request.form["edit"]
field = request.form["field"]
pnumber = request.form["pnumber"]
#Convert values into dictionary keys
#I didn't want to do this in HTML, because I wasn't sure if it will mess up or not
convert = {"fname": "First Name", "lname": "Last Name", "birthday": "Birthday", "pnumber": "Phone Number", "email": "Email", "importance": "Importance"}
bookname = request.args.get("bookname")
#Get value form appropriate field
#Different fields have different input types!
if convert[field] in ["First Name", "Last Name", "Phone Number"]:
value = request.form["value1"]
elif convert[field] == "Birthday":
value = int(datetime.datetime.strptime(request.form["value2"], "%Y-%m-%d").timestamp())
elif convert[field] == "Email":
value = request.form["value3"]
elif convert[field] == "Importance":
value = int(request.form["value4"])
#I need to mend contacts in the book
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM books WHERE username=? AND bookname=?", (username, bookname,))
r = c.fetchall()
#Book does not exist
if r == []:
return redirect(url_for("books"))
c.execute("UPDATE books SET time=? WHERE username=? AND bookname=?", (int(time.time()), username, bookname,))
conn.commit()
c.close()
conn.close()
book = Book(username, bookname)
book.load()
#I want to change phone number
t = book.pnumber(pnumber)
if convert[field] == "Phone Number":
c = book.pnumber(value)
#You can't just steal another number like that!
if c is not None:
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, error="Phone Number is invalid!", av=av)
#Change the graph again
elif r[0][4] == "Y":
#Get user's phone number
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM users WHERE username=?", (username,))
r = c.fetchall()[0]
l = []
#Read graph
with open("graph.gr", "r") as file:
re = DictReader(file)
for i in re:
if i["source"] != r[3] or i["end"] != pnumber: l.append(dict(i))
l.append({"source": r[3], "end": value})
#Output graph without deleted edge
with open("graph.gr", "w+") as file:
fwriter = writer(file, delimiter=',', quotechar='"')
fwriter.writerow(["source", "end"])
for i in l:
fwriter.writerow([i["source"], i["end"]])
#No empty attributes please
if value == "" or value == 0:
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, error="No new value!", av=av)
#You can't modify something that doesn't exist
row = book.pnumber(pnumber)
if row is None:
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, error="No new value!", av=av)
#Not a hacky solution
#Explained above
#Delete the row
book.delete(pnumber)
book.save()
book.load()
#Insert back
old = row[convert[field]]
row[convert[field]] = value
book.insert(row)
book.save()
book.load()
#Push to undo stack
undo = Changes(username, bookname, "un")
undo.load()
#I like storing things with random keys to confuse myself
#Empty values which are integers get padded with 0
t = {"First Name": convert[field], "Last Name": old, "Phone Number": value, "Birthday": 0, "Email": pnumber, "Importance": 0}
undo.insert("EDIT", t)
undo.save()
r = book.all()
av = get_average(r)
return render_template("view_book.html", username=username, bookname=bookname, book=r, success="Successfully edited!", av=av)
except:
#The last case is an insert
#Let me fill up the book
bookname = request.args.get("bookname")
conn = sqlite3.connect('storage.db')
c = conn.cursor()
c.execute("SELECT * FROM books WHERE username=? AND bookname=?", (username, bookname,))
r = c.fetchall()
#Book does not exist
if r == []:
return redirect(url_for("books"))
c.execute("UPDATE books SET time=? WHERE username=? AND bookname=?", (int(time.time()), username, bookname,))
conn.commit()
c.close()
conn.close()
book = Book(username, bookname)
book.load()
#Prepare row to insert
d = {}
d["First Name"] = request.form["fname"]
d["Last Name"] = request.form["lname"]
#Convert bithday to seconds since epoch
#Technically a bad storage method for a | |
from datetime import datetime
import pytest
from click.testing import CliRunner
from mock.mock import patch
from ecs_deploy import cli
from ecs_deploy.cli import get_client, record_deployment
from ecs_deploy.ecs import EcsClient
from ecs_deploy.newrelic import Deployment, NewRelicDeploymentException
from tests.test_ecs import EcsTestClient, CLUSTER_NAME, SERVICE_NAME, \
TASK_DEFINITION_ARN_1, TASK_DEFINITION_ARN_2, TASK_DEFINITION_FAMILY_1, \
TASK_DEFINITION_REVISION_2, TASK_DEFINITION_REVISION_1, \
TASK_DEFINITION_REVISION_3
@pytest.fixture
def runner():
return CliRunner()
@patch.object(EcsClient, '__init__')
def test_get_client(ecs_client):
ecs_client.return_value = None
client = get_client('access_key_id', 'secret_access_key', 'region', 'profile')
ecs_client.assert_called_once_with('access_key_id', 'secret_access_key', 'region', 'profile')
assert isinstance(client, EcsClient)
def test_ecs(runner):
result = runner.invoke(cli.ecs)
assert result.exit_code == 0
assert not result.exception
assert 'Usage: ecs [OPTIONS] COMMAND [ARGS]' in result.output
assert ' deploy ' in result.output
assert ' scale ' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME))
assert result.exit_code == 1
assert result.output == u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n'
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_invalid_cluster(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, ('unknown-cluster', SERVICE_NAME))
assert result.exit_code == 1
assert result.output == u'An error occurred (ClusterNotFoundException) when calling the DescribeServices ' \
u'operation: Cluster not found.\n\n'
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_invalid_service(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, 'unknown-service'))
assert result.exit_code == 1
assert result.output == u'An error occurred when calling the DescribeServices operation: Service not found.\n\n'
@patch('ecs_deploy.cli.get_client')
def test_deploy(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" not in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_rollback(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', wait=2)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--timeout=1', '--rollback'))
assert result.exit_code == 1
assert result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Deployment failed" in result.output
assert u"Rolling back to task definition: test-task:1" in result.output
assert u'Successfully changed task definition to: test-task:1' in result.output
assert u"Rollback successful" in result.output
assert u'Deployment failed, but service has been rolled back to ' \
u'previous task definition: test-task:1' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_deregister(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--no-deregister'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' not in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" not in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_role_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-r', 'arn:new:role'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" in result.output
assert u'Changed role_arn to: "arn:new:role" (was: "arn:test:role:1")' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_execution_role_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-x', 'arn:new:role'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" in result.output
assert u'Changed execution_role_arn to: "arn:new:role" (was: "arn:test:role:1")' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_new_tag(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-t', 'latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "webserver" to: "webserver:latest" (was: "webserver:123")' in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_image(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-i', 'application', 'application:latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_two_new_images(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-i', 'application', 'application:latest',
'-i', 'webserver', 'webserver:latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "webserver" to: "webserver:latest" (was: "webserver:123")' in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_command(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-c', 'application', 'foobar'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed command of container "application" to: "foobar" (was: "run")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@pytest.mark.parametrize(
'cmd_input, cmd_expected',
(
(
u'curl -f http://localhost/alive/',
u'curl -f http://localhost/alive/',
),
(
u'CMD-SHELL curl -f http://localhost/alive/ || 1',
u'CMD-SHELL curl -f http://localhost/alive/ || 1',
)
)
)
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_health_check(get_client, cmd_input, cmd_expected, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-h', 'application', cmd_input, 30, 5, 3, 0))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
expected_health_check = {
u'command': cmd_expected,
u'interval': 30,
u'timeout': 5,
u'retries': 3,
u'startPeriod': 0,
}
assert 'Changed healthCheck of container "application" to: ' in result.output
assert "'command': " in result.output
assert cmd_expected in result.output
assert "'interval': 30" in result.output
assert "'timeout': 5" in result.output
assert "'retries': 3" in result.output
assert "'startPeriod': 0" in result.output
assert '(was: "None")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-e', 'application', 'foo', 'bar',
'-e', 'webserver', 'foo', 'baz'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "foo" of container "application" to: "bar"' in result.output
assert u'Changed environment "foo" of container "webserver" to: "baz"' in result.output
assert u'Changed environment "lorem" of container "webserver" to: "ipsum"' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_change_environment_variable_empty_string(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'application', 'foo', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "foo" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_new_empty_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'application', 'new', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "new" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_empty_environment_variable_again(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'webserver', 'empty', ''))
assert | |
else:
props[output_attr] = candidate.value
break
return None
def max_zoom_filter(ctx):
"""
For features with a max_zoom, remove them if it's < nominal zoom.
"""
params = _Params(ctx, 'max_zoom_filter')
layers = params.required('layers', typ=list)
nominal_zoom = ctx.nominal_zoom
for layer_name in layers:
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
new_features = []
for feature in features:
_, props, _ = feature
max_zoom = props.get('max_zoom')
if max_zoom is None or max_zoom >= nominal_zoom:
new_features.append(feature)
layer['features'] = new_features
return None
def min_zoom_filter(ctx):
"""
For features with a min_zoom, remove them if it's > nominal zoom + 1.
"""
params = _Params(ctx, 'min_zoom_filter')
layers = params.required('layers', typ=list)
nominal_zoom = ctx.nominal_zoom
for layer_name in layers:
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
new_features = []
for feature in features:
_, props, _ = feature
min_zoom = props.get('min_zoom')
if min_zoom is not None and min_zoom < nominal_zoom + 1:
new_features.append(feature)
layer['features'] = new_features
return None
def tags_set_ne_pop_min_max_default(ctx):
"""
The data may potentially a join result of OSM and NE so there are different
scenarios when we populate the population and population_rank fields.
population:
(1) if the data has population from OSM use it as is
(2) if the data has no population from OSM, use __ne_pop_min to back-fill
(3) if the data has no population from OSM and no __ne_pop_min from NE
either(no OSM<>NE join or NE just don't have non-nil __ne_pop_min value),
then use the estimate value to back-fill based its kind_detail
population_rank:
(1) if the data has __ne_pop_max, use it to calculate population_rank
(2) if the data doesn't have __ne_pop_max(no OSM<>NE join or NE just don't
have non-nil __ne_pop_max value) use the population value determined by the
above procedure to calculate it.
"""
params = _Params(ctx, 'tags_set_ne_pop_min_max')
layer_name = params.required('layer')
layer = _find_layer(ctx.feature_layers, layer_name)
for _, props, _ in layer['features']:
__ne_pop_min = props.pop('__ne_pop_min', None)
__ne_pop_max = props.pop('__ne_pop_max', None)
population = props.get('population')
if population is None:
population = __ne_pop_min
if population is None:
kind = props.get('kind')
kind_detail = props.get('kind_detail')
# the following are estimate population for each kind_detail
if kind == 'locality':
if kind_detail == 'city':
population = 10000
elif kind_detail == 'town':
population = 5000
elif kind_detail == 'village':
population = 2000
elif kind_detail == 'locality':
population = 1000
elif kind_detail == 'hamlet':
population = 200
elif kind_detail == 'isolated_dwelling':
population = 100
elif kind_detail == 'farm':
population = 50
population = to_float(population)
if population is not None:
props['population'] = int(population)
if __ne_pop_max is not None:
props['population_rank'] = _calculate_population_rank(__ne_pop_max)
elif population is not None:
props['population_rank'] = \
_calculate_population_rank(props['population'])
def tags_set_ne_min_max_zoom(ctx):
"""
Override the min zoom and max zoom properties with __ne_* variants from
Natural Earth, if there are any.
"""
params = _Params(ctx, 'tags_set_ne_min_max_zoom')
layer_name = params.required('layer')
layer = _find_layer(ctx.feature_layers, layer_name)
for _, props, _ in layer['features']:
min_zoom = props.pop('__ne_min_zoom', None)
if min_zoom is not None:
# don't overstuff features into tiles when they are in the
# long tail of won't display, but make their min_zoom
# consistent with when they actually show in tiles
if min_zoom % 1 > 0.5:
min_zoom = ceil(min_zoom)
props['min_zoom'] = min_zoom
elif props.get('kind') == 'country':
# countries and regions which don't have a min zoom joined from NE
# are probably either vandalism or unrecognised countries. either
# way, we probably don't want to see them at zoom, which is lower
# than most of the curated NE min zooms. see issue #1826 for more
# information.
props['min_zoom'] = max(6, props['min_zoom'])
elif props.get('kind') == 'region':
props['min_zoom'] = max(8, props['min_zoom'])
max_zoom = props.pop('__ne_max_zoom', None)
if max_zoom is not None:
props['max_zoom'] = max_zoom
return None
def whitelist(ctx):
"""
Applies a whitelist to a particular property on all features in the layer,
optionally also remapping some values.
"""
params = _Params(ctx, 'whitelist')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
property_name = params.required('property')
whitelist = params.required('whitelist', typ=list)
remap = params.optional('remap', default={}, typ=dict)
where = params.optional('where')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
_, props, _ = feature
# skip this feature if there's a where clause and it evaluates falsey.
if where is not None:
local = props.copy()
local['zoom'] = ctx.nominal_zoom
if not eval(where, {}, local):
continue
value = props.get(property_name)
if value is not None:
if value in whitelist:
# leave value as-is
continue
elif value in remap:
# replace with replacement value
props[property_name] = remap[value]
else:
# drop the property
props.pop(property_name)
return None
def remap(ctx):
"""
Maps some values for a particular property to others. Similar to whitelist,
but won't remove the property if there's no match.
"""
params = _Params(ctx, 'remap')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
property_name = params.required('property')
remap = params.optional('remap', default={}, typ=dict)
where = params.optional('where')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
shape, props, _ = feature
# skip this feature if there's a where clause and it evaluates falsey.
if where is not None:
local = props.copy()
local['zoom'] = ctx.nominal_zoom
local['geom_type'] = shape.geom_type
if not eval(where, {}, local):
continue
value = props.get(property_name)
if value in remap:
# replace with replacement value
props[property_name] = remap[value]
return None
def backfill(ctx):
"""
Backfills default values for some features. In other words, if the feature
lacks some or all of the defaults, then set those defaults.
"""
params = _Params(ctx, 'whitelist')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
defaults = params.required('defaults', typ=dict)
where = params.optional('where')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
_, props, _ = feature
# skip this feature if there's a where clause and it evaluates truthy.
if where is not None:
local = props.copy()
local['zoom'] = ctx.nominal_zoom
if not eval(where, {}, local):
continue
for k, v in defaults.iteritems():
if k not in props:
props[k] = v
return None
def clamp_min_zoom(ctx):
"""
Clamps the min zoom for features depending on context.
"""
params = _Params(ctx, 'clamp_min_zoom')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
clamp = params.required('clamp', typ=dict)
property_name = params.required('property')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
_, props, _ = feature
value = props.get(property_name)
min_zoom = props.get('min_zoom')
if value is not None and min_zoom is not None:
min_val = clamp.get(value)
if min_val is not None and min_val > min_zoom:
props['min_zoom'] = min_val
return None
def add_vehicle_restrictions(shape, props, fid, zoom):
"""
Parse the maximum height, weight, length, etc... restrictions on vehicles
and create the `hgv_restriction` and `hgv_restriction_shield_text`.
"""
from math import floor
def _one_dp(val, unit):
deci = int(floor(10 * val))
if deci % 10 == 0:
return '%d%s' % (deci / 10, unit)
return '%.1f%s' % (0.1 * deci, unit)
def _metres(val):
# parse metres or feet and inches, return cm
metres = _to_float_meters(val)
if metres:
return True, _one_dp(metres, 'm')
return False, None
def _tonnes(val):
tonnes = to_float(val)
if tonnes:
return True, _one_dp(tonnes, 't')
return False, None
def _false(val):
| |
# of mute_feedback_notifications and mute_suggestion_notifications
# should match the default values.
exploration_user_model = (
user_services.user_models.ExplorationUserDataModel.get(
user_id, exploration_id))
self.assertIsNone(exploration_user_model)
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertEqual(
email_preferences.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertEqual(
email_preferences.mute_suggestion_notifications,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
# This initializes a ExplorationUserDataModel instance with
# the default mute values.
user_services.set_email_preferences_for_exploration(
user_id, exploration_id,
mute_feedback_notifications=(
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE),
mute_suggestion_notifications=(
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE))
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertEqual(
email_preferences.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertEqual(
email_preferences.mute_suggestion_notifications,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
# This sets only mute_suggestion_notifications property to True.
# mute_feedback_notifications should remain same as before.
user_services.set_email_preferences_for_exploration(
user_id, exploration_id, mute_suggestion_notifications=True)
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertEqual(
email_preferences.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertTrue(email_preferences.mute_suggestion_notifications)
# This sets only mute_feedback_notifications property to True.
# mute_suggestion_notifications should remain same as before.
user_services.set_email_preferences_for_exploration(
user_id, exploration_id, mute_feedback_notifications=True)
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertTrue(email_preferences.mute_feedback_notifications)
self.assertTrue(email_preferences.mute_suggestion_notifications)
def test_get_usernames_by_role(self):
auth_ids = ['test1', 'test2', 'test3', 'test4']
usernames = ['name1', 'name2', 'name3', 'name4']
user_emails = [
'<EMAIL>', '<EMAIL>',
'<EMAIL>', '<EMAIL>']
user_ids = []
for auth_id, email, name in python_utils.ZIP(
auth_ids, user_emails, usernames):
user_id = user_services.create_new_user(auth_id, email).user_id
user_ids.append(user_id)
user_services.set_username(user_id, name)
user_services.add_user_role(user_ids[0], feconf.ROLE_ID_MODERATOR)
user_services.add_user_role(user_ids[1], feconf.ROLE_ID_MODERATOR)
user_services.add_user_role(user_ids[2], feconf.ROLE_ID_TOPIC_MANAGER)
user_services.add_user_role(user_ids[3], feconf.ROLE_ID_TOPIC_MANAGER)
self.assertEqual(
set(user_services.get_usernames_by_role(feconf.ROLE_ID_MODERATOR)),
set(['name1', 'name2']))
self.assertEqual(
set(user_services.get_usernames_by_role(
feconf.ROLE_ID_TOPIC_MANAGER)),
set(['name3', 'name4']))
def test_get_user_ids_by_role(self):
auth_ids = ['test1', 'test2', 'test3', 'test4']
usernames = ['name1', 'name2', 'name3', 'name4']
user_emails = [
'<EMAIL>', '<EMAIL>',
'<EMAIL>', '<EMAIL>']
user_ids = []
for uid, email, name in python_utils.ZIP(
auth_ids, user_emails, usernames):
user_id = user_services.create_new_user(uid, email).user_id
user_ids.append(user_id)
user_services.set_username(user_id, name)
user_services.add_user_role(user_ids[0], feconf.ROLE_ID_MODERATOR)
user_services.add_user_role(user_ids[1], feconf.ROLE_ID_MODERATOR)
user_services.add_user_role(
user_ids[2], feconf.ROLE_ID_CURRICULUM_ADMIN)
user_services.add_user_role(
user_ids[3], feconf.ROLE_ID_CURRICULUM_ADMIN)
self.assertEqual(
set(user_services.get_user_ids_by_role(feconf.ROLE_ID_MODERATOR)),
set([user_ids[0], user_ids[1]]))
self.assertEqual(
set(user_services.get_user_ids_by_role(
feconf.ROLE_ID_CURRICULUM_ADMIN)),
set([user_ids[2], user_ids[3]]))
def test_update_user_creator_dashboard_display(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
user_setting = user_services.get_user_settings(user_id)
self.assertEqual(
user_setting.creator_dashboard_display_pref,
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD'])
user_services.update_user_creator_dashboard_display(
user_id, constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['LIST'])
user_setting = user_services.get_user_settings(user_id)
self.assertEqual(
user_setting.creator_dashboard_display_pref,
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['LIST'])
def test_add_user_role(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
self.assertEqual(
user_services.get_user_roles_from_id(user_id),
[feconf.ROLE_ID_FULL_USER])
user_services.add_user_role(
user_id, feconf.ROLE_ID_COLLECTION_EDITOR)
self.assertEqual(
user_services.get_user_roles_from_id(user_id), [
feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_COLLECTION_EDITOR])
def test_adding_other_roles_to_full_user_updates_roles(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertEqual(
user_settings_model.roles, [feconf.ROLE_ID_FULL_USER])
self.assertFalse(user_settings_model.banned)
user_services.add_user_role(
user_id, feconf.ROLE_ID_COLLECTION_EDITOR)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertEqual(
user_settings_model.roles, [
feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_COLLECTION_EDITOR])
self.assertFalse(user_settings_model.banned)
user_services.add_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertEqual(
user_settings_model.roles, [
feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_COLLECTION_EDITOR,
feconf.ROLE_ID_TOPIC_MANAGER])
self.assertFalse(user_settings_model.banned)
user_services.add_user_role(
user_id, feconf.ROLE_ID_MODERATOR)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertEqual(
user_settings_model.roles, [
feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_COLLECTION_EDITOR,
feconf.ROLE_ID_TOPIC_MANAGER, feconf.ROLE_ID_MODERATOR])
self.assertFalse(user_settings_model.banned)
user_services.add_user_role(
user_id, feconf.ROLE_ID_CURRICULUM_ADMIN)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertEqual(
user_settings_model.roles, [
feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_COLLECTION_EDITOR,
feconf.ROLE_ID_TOPIC_MANAGER, feconf.ROLE_ID_MODERATOR,
feconf.ROLE_ID_CURRICULUM_ADMIN])
self.assertFalse(user_settings_model.banned)
def test_profile_user_settings_have_correct_roles(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
user_settings_model.pin = '12346'
user_settings_model.update_timestamps()
user_settings_model.put()
profile_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
modifiable_user_data = user_domain.ModifiableUserData.from_raw_dict(
profile_user_data_dict)
profile_user_id = user_services.create_new_profiles(
auth_id, user_email, [modifiable_user_data])[0].user_id
profile_user_settings_model = user_models.UserSettingsModel.get_by_id(
profile_user_id)
self.assertEqual(
profile_user_settings_model.roles, [feconf.ROLE_ID_MOBILE_LEARNER])
self.assertFalse(profile_user_settings_model.banned)
def test_get_all_profiles_auth_details_non_existent_id_raises_error(self):
non_existent_user_id = 'id_x'
error_msg = 'Parent user not found.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.get_all_profiles_auth_details_by_parent_user_id(
non_existent_user_id)
def test_add_user_role_to_mobile_learner_raises_exception(self):
auth_id = 'test_id'
user_email = '<EMAIL>'
user_pin = '12345'
profile_pin = '123'
display_alias = 'display_alias'
display_alias_2 = 'display_alias_2'
user_id = user_services.create_new_user(auth_id, user_email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
auth_id, user_email, [self.modifiable_new_user_data])
profile_user_id = (
user_services.get_all_profiles_auth_details_by_parent_user_id(
user_id)[0].user_id
)
self.assertEqual(
user_services.get_user_roles_from_id(profile_user_id),
[feconf.ROLE_ID_MOBILE_LEARNER])
error_msg = 'The role of a Mobile Learner cannot be changed.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.add_user_role(
profile_user_id, feconf.ROLE_ID_FULL_USER)
def test_add_full_user_role_to_learner_raises_exception(self):
auth_id = 'test_id'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
self.assertEqual(
user_services.get_user_roles_from_id(user_id),
[feconf.ROLE_ID_FULL_USER])
error_msg = 'Adding a %s role is not allowed.' % (
feconf.ROLE_ID_MOBILE_LEARNER)
with self.assertRaisesRegexp(Exception, error_msg):
user_services.add_user_role(
user_id, feconf.ROLE_ID_MOBILE_LEARNER)
def test_removing_role_from_mobile_learner_user_raises_exception(self):
auth_id = 'test_id'
user_email = '<EMAIL>'
user_pin = '12345'
profile_pin = '123'
display_alias = 'display_alias'
display_alias_2 = 'display_alias_2'
user_id = user_services.create_new_user(auth_id, user_email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
auth_id, user_email, [self.modifiable_new_user_data])
profile_user_id = (
user_services.get_all_profiles_auth_details_by_parent_user_id(
user_id)[0].user_id
)
self.assertEqual(
user_services.get_user_roles_from_id(profile_user_id),
[feconf.ROLE_ID_MOBILE_LEARNER])
error_msg = 'The role of a Mobile Learner cannot be changed.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.remove_user_role(
profile_user_id, feconf.ROLE_ID_TOPIC_MANAGER)
def test_removing_default_user_role_raises_exception(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertEqual(
user_settings_model.roles, [feconf.ROLE_ID_FULL_USER])
self.assertFalse(user_settings_model.banned)
error_msg = 'Removing a default role is not allowed.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.remove_user_role(user_id, feconf.ROLE_ID_FULL_USER)
def test_mark_user_banned(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertFalse(user_settings_model.banned)
user_services.mark_user_banned(user_id)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertTrue(user_settings_model.banned)
def test_unmark_banned_user(self):
auth_id = 'test_id'
username = 'testname'
user_email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, user_email).user_id
user_services.set_username(user_id, username)
user_services.mark_user_banned(user_id)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertTrue(user_settings_model.banned)
user_services.unmark_user_banned(user_id)
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
self.assertFalse(user_settings_model.banned)
def test_create_new_user_creates_a_new_user_auth_details_entry(self):
new_auth_id = 'new_auth_id'
new_email = '<EMAIL>'
self.assertIsNone(auth_services.get_user_id_from_auth_id(new_auth_id))
user_id = user_services.create_new_user(new_auth_id, new_email).user_id
self.assertIsNotNone(auth_models.UserAuthDetailsModel.get(user_id))
self.assertEqual(
auth_services.get_auth_id_from_user_id(user_id), new_auth_id)
def test_get_auth_details_by_user_id_for_existing_user_works_fine(self):
auth_id = 'new_auth_id'
email = '<EMAIL>'
user_id = user_services.create_new_user(auth_id, email).user_id
user_auth_details_model = auth_models.UserAuthDetailsModel.get(user_id)
user_auth_details = user_services.get_auth_details_by_user_id(user_id)
self.assertEqual(
user_auth_details.user_id, user_auth_details_model.id)
self.assertEqual(
user_auth_details.gae_id, user_auth_details_model.gae_id)
self.assertEqual(
user_auth_details.parent_user_id,
user_auth_details_model.parent_user_id)
def test_get_auth_details_by_user_id_non_existing_user_returns_none(self):
non_existent_user_id = 'id_x'
self.assertIsNone(
user_services.get_auth_details_by_user_id(non_existent_user_id))
def test_get_auth_details_by_user_id_strict_non_existing_user_error(self):
non_existent_user_id = 'id_x'
error_msg = 'User not found'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.get_auth_details_by_user_id(
non_existent_user_id, strict=True)
def test_get_auth_details_by_auth_id_non_existing_user_returns_none(self):
non_existent_user_id = 'id_x'
self.assertIsNone(
user_services.get_auth_details_by_user_id(non_existent_user_id))
def test_create_new_profile_with_parent_user_pin_set_is_success(self):
auth_id = 'auth_id'
email = '<EMAIL>'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
user_pin = '12345'
profile_pin = '123'
user_id = user_services.create_new_user(auth_id, email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
auth_id, email, [self.modifiable_new_user_data]
)
user_auth_details_models = (
user_services.get_all_profiles_auth_details_by_parent_user_id(
user_id)
)
self.assertEqual(len(user_auth_details_models), 1)
self.assertEqual(user_auth_details_models[0].parent_user_id, user_id)
self.assertIsNone(user_auth_details_models[0].gae_id)
def test_create_new_profile_with_parent_user_pin_not_set_raises_error(self):
auth_id = 'auth_id'
email = '<EMAIL>'
display_alias = 'display_alias'
profile_pin = '123'
user_services.create_new_user(auth_id, email)
error_msg = 'Pin must be set for a full user before creating a profile.'
with self.assertRaisesRegexp(Exception, error_msg):
self.modifiable_new_user_data.display_alias = display_alias
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
auth_id, email, [self.modifiable_new_user_data])
def test_create_multiple_new_profiles_for_same_user_works_correctly(self):
auth_id = 'auth_id'
email = '<EMAIL>'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
display_alias_3 = 'display_alias3'
user_pin = '12345'
profile_pin = '123'
user_id = user_services.create_new_user(auth_id, email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
new_user_data_dict_2 = {
'schema_version': 1,
'display_alias': display_alias_3,
'pin': None,
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
modifiable_new_user_data_2 = (
user_domain.ModifiableUserData.from_raw_dict(
new_user_data_dict_2))
user_settings_list = user_services.create_new_profiles(
auth_id, email, [
self.modifiable_new_user_data, modifiable_new_user_data_2
]
)
profile_1_id = user_settings_list[0].user_id
profile_2_id = user_settings_list[1].user_id
user_auth_details_models = [
{
'id': model.id,
'auth_id': model.gae_id,
'parent_user_id': model.parent_user_id
} for model in
auth_services.get_all_profiles_by_parent_user_id(user_id)
]
expected_user_auth_output = [
{
'id': profile_1_id,
'auth_id': None,
'parent_user_id': user_id
},
{
'id': profile_2_id,
'auth_id': None,
'parent_user_id': user_id
}
]
self.assertItemsEqual(
user_auth_details_models, expected_user_auth_output)
user_settings_models = [
{
'id': model.id,
'display_alias': model.display_alias,
'pin': model.pin,
'roles': model.roles
} for model in
user_models.UserSettingsModel.get_multi(
[profile_1_id, profile_2_id])
]
expected_user_settings_output = [
{
'id': profile_1_id,
'display_alias': display_alias_2,
'pin': profile_pin,
'roles': [feconf.ROLE_ID_MOBILE_LEARNER]
},
{
'id': profile_2_id,
'display_alias': display_alias_3,
'pin': None,
'roles': [feconf.ROLE_ID_MOBILE_LEARNER]
}
]
self.assertItemsEqual(
user_settings_models, expected_user_settings_output)
def test_create_new_profile_with_nonexistent_user_raises_error(self):
non_existent_auth_id = 'auth_id_x'
non_existent_email = '<EMAIL>'
profile_pin = '123'
display_alias = 'display_alias'
error_msg = 'User not found.'
with self.assertRaisesRegexp(Exception, error_msg):
self.modifiable_new_user_data.display_alias = display_alias
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
non_existent_auth_id, non_existent_email,
[self.modifiable_new_user_data]
)
def test_create_new_profile_modifiable_user_with_user_id_raises_error(self):
auth_id = 'auth_id'
email = '<EMAIL>'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
user_pin = '12345'
profile_pin = '123'
user_id = user_services.create_new_user(auth_id, email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
error_msg = 'User id cannot already exist for a new user.'
with self.assertRaisesRegexp(Exception, error_msg):
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
self.modifiable_new_user_data.user_id = 'user_id'
user_services.create_new_profiles(
auth_id, email, [self.modifiable_new_user_data]
)
def test_update_users_modifiable_object_user_id_not_set_raises_error(self):
auth_id = 'auth_id'
email = '<EMAIL>'
display_alias = 'display_alias2'
user_pin = '12345'
user_services.create_new_user(auth_id, email)
self.modifiable_user_data.user_id = None
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
error_msg = 'Missing user ID.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.update_multiple_users_data(
[self.modifiable_user_data])
def test_update_users_for_user_with_non_existent_id_raises_error(self):
auth_id = 'auth_id'
non_existent_user_id = 'id_x'
email = '<EMAIL>'
display_alias = 'display_alias2'
user_pin = '12345'
user_services.create_new_user(auth_id, email)
self.modifiable_user_data.user_id = non_existent_user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
error_msg = 'User not found.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.update_multiple_users_data(
[self.modifiable_user_data])
def test_update_users_data_for_multiple_users_works_correctly(self):
# Preparing for the test.
auth_id = 'auth_id'
email = '<EMAIL>'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
display_alias_3 = 'display_alias3'
user_pin = '12345'
profile_pin = '123'
user_id = user_services.create_new_user(auth_id, email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
new_user_data_dict_2 = {
'schema_version': 1,
'display_alias': display_alias_3,
'pin': None,
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
| |
)
params.bxy_channel = str(uuid4())
params.bxy_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.bxy_channel = 4
assert all(
[s in str(excinfo.value) for s in ["bxy_channel", "type", "int", "str", "UUID"]]
)
def test_bxy_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bxy_uncertainty = str(uuid4())
params.bxy_uncertainty = uuid4()
params.bxy_uncertainty = 4
params.bxy_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.bxy_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"bxy_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_bxz_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bxz_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.bxz_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["bxz_channel_bool", "type", "str", "bool"]]
)
def test_bxz_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bxz_channel = str(uuid4())
params.bxz_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.bxz_channel = 4
assert all(
[s in str(excinfo.value) for s in ["bxz_channel", "type", "int", "str", "UUID"]]
)
def test_bxz_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bxz_uncertainty = str(uuid4())
params.bxz_uncertainty = uuid4()
params.bxz_uncertainty = 4
params.bxz_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.bxz_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"bxz_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_byy_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.byy_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.byy_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["byy_channel_bool", "type", "str", "bool"]]
)
def test_byy_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.byy_channel = str(uuid4())
params.byy_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.byy_channel = 4
assert all(
[s in str(excinfo.value) for s in ["byy_channel", "type", "int", "str", "UUID"]]
)
def test_byy_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.byy_uncertainty = str(uuid4())
params.byy_uncertainty = uuid4()
params.byy_uncertainty = 4
params.byy_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.byy_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"byy_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_byz_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.byz_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.byz_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["byz_channel_bool", "type", "str", "bool"]]
)
def test_byz_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.byz_channel = str(uuid4())
params.byz_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.byz_channel = 4
assert all(
[s in str(excinfo.value) for s in ["byz_channel", "type", "int", "str", "UUID"]]
)
def test_byz_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.byz_uncertainty = str(uuid4())
params.byz_uncertainty = uuid4()
params.byz_uncertainty = 4
params.byz_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.byz_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"byz_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_bzz_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bzz_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.bzz_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["bzz_channel_bool", "type", "str", "bool"]]
)
def test_bzz_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bzz_channel = str(uuid4())
params.bzz_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.bzz_channel = 4
assert all(
[s in str(excinfo.value) for s in ["bzz_channel", "type", "int", "str", "UUID"]]
)
def test_bzz_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bzz_uncertainty = str(uuid4())
params.bzz_uncertainty = uuid4()
params.bzz_uncertainty = 4
params.bzz_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.bzz_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"bzz_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_bx_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bx_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.bx_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["bx_channel_bool", "type", "str", "bool"]]
)
def test_bx_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bx_channel = str(uuid4())
params.bx_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.bx_channel = 4
assert all(
[s in str(excinfo.value) for s in ["bx_channel", "type", "int", "str", "UUID"]]
)
def test_bx_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bx_uncertainty = str(uuid4())
params.bx_uncertainty = uuid4()
params.bx_uncertainty = 4
params.bx_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.bx_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"bx_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_by_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.by_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.by_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["by_channel_bool", "type", "str", "bool"]]
)
def test_by_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.by_channel = str(uuid4())
params.by_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.by_channel = 4
assert all(
[s in str(excinfo.value) for s in ["by_channel", "type", "int", "str", "UUID"]]
)
def test_by_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.by_uncertainty = str(uuid4())
params.by_uncertainty = uuid4()
params.by_uncertainty = 4
params.by_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.by_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"by_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_bz_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bz_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.bz_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["bz_channel_bool", "type", "str", "bool"]]
)
def test_bz_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bz_channel = str(uuid4())
params.bz_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.bz_channel = 4
assert all(
[s in str(excinfo.value) for s in ["bz_channel", "type", "int", "str", "UUID"]]
)
def test_bz_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.bz_uncertainty = str(uuid4())
params.bz_uncertainty = uuid4()
params.bz_uncertainty = 4
params.bz_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.bz_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"bz_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_tmi_channel_bool():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.tmi_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.tmi_channel_bool = "alskdj"
assert all(
[s in str(excinfo.value) for s in ["tmi_channel_bool", "type", "str", "bool"]]
)
def test_tmi_channel():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.tmi_channel = str(uuid4())
params.tmi_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.tmi_channel = 4
assert all(
[s in str(excinfo.value) for s in ["tmi_channel", "type", "int", "str", "UUID"]]
)
def test_tmi_uncertainty():
params = MagneticScalarParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.tmi_uncertainty = str(uuid4())
params.tmi_uncertainty = uuid4()
params.tmi_uncertainty = 4
params.tmi_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.tmi_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"tmi_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_direct_current_inversion_type():
params = DirectCurrentParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.inversion_type = "direct current"
with pytest.raises(ValueError) as excinfo:
params.inversion_type = "alskdj"
assert all(
[
s in str(excinfo.value)
for s in ["inversion_type", "alskdj", "direct current"]
]
)
def test_direct_current_data_object():
params = DirectCurrentParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.data_object = uuid4()
with pytest.raises(TypeError) as excinfo:
params.data_object = 4
assert all(
[
s in str(excinfo.value)
for s in ["data_object", "type", "int", "UUID", "PotentialElectrode"]
]
)
def test_potential_channel_bool():
params = DirectCurrentParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.potential_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.potential_channel_bool = "alskdj"
assert all(
[
s in str(excinfo.value)
for s in ["potential_channel_bool", "type", "str", "bool"]
]
)
def test_potential_channel():
params = DirectCurrentParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.potential_channel = str(uuid4())
params.potential_channel = uuid4()
with pytest.raises(TypeError) as excinfo:
params.potential_channel = 4
assert all(
[
s in str(excinfo.value)
for s in ["potential_channel", "type", "int", "str", "UUID"]
]
)
def test_potential_uncertainty():
params = DirectCurrentParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.potential_uncertainty = str(uuid4())
params.potential_uncertainty = uuid4()
params.potential_uncertainty = 4
params.potential_uncertainty = 4.0
with pytest.raises(TypeError) as excinfo:
params.potential_uncertainty = geoh5
assert all(
[
s in str(excinfo.value)
for s in [
"potential_uncertainty",
"type",
"Workspace",
"str",
"int",
"float",
"UUID",
]
]
)
def test_induced_polarization_inversion_type():
params = InducedPolarizationParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.inversion_type = "induced polarization"
with pytest.raises(ValueError) as excinfo:
params.inversion_type = "alskdj"
assert all(
[
s in str(excinfo.value)
for s in ["inversion_type", "alskdj", "induced polarization"]
]
)
def test_direct_current_data_object():
params = InducedPolarizationParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.data_object = uuid4()
with pytest.raises(TypeError) as excinfo:
params.data_object = 4
assert all(
[
s in str(excinfo.value)
for s in ["data_object", "type", "int", "UUID", "PotentialElectrode"]
]
)
def test_chargeability_channel_bool():
params = InducedPolarizationParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.chargeability_channel_bool = True
with pytest.raises(TypeError) as excinfo:
params.chargeability_channel_bool = "alskdj"
assert all(
[
s in str(excinfo.value)
for s in ["chargeability_channel_bool", "type", "str", "bool"]
]
)
def test_chargeability_channel():
params = InducedPolarizationParams(
validate=True, validator_opts={"ignore_requirements": True}
)
params.chargeability_channel = str(uuid4())
params.chargeability_channel = uuid4()
with | |
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_fooddelivery_changelist'), data)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
'with this Driver and Restaurant already exists.</li></ul></td></tr>',
2,
html=True
)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_person_changelist'), data)
non_form_errors = response.context['cl'].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(str(non_form_errors), str(ErrorList(["Grace is not a Zombie"])))
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="<NAME>")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_category_changelist'), data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Pagination works for list_editable items.
"""
UnorderedObject.objects.create(id=1, name='Unordered object #1')
UnorderedObject.objects.create(id=2, name='Unordered object #2')
UnorderedObject.objects.create(id=3, name='Unordered object #3')
response = self.client.get(reverse('admin:admin_views_unorderedobject_changelist'))
self.assertContains(response, 'Unordered object #3')
self.assertContains(response, 'Unordered object #2')
self.assertNotContains(response, 'Unordered object #1')
response = self.client.get(reverse('admin:admin_views_unorderedobject_changelist') + '?p=2')
self.assertNotContains(response, 'Unordered object #3')
self.assertNotContains(response, 'Unordered object #2')
self.assertContains(response, 'Unordered object #1')
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ['3'],
"action": ['', 'delete_selected'],
}
self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertIs(Person.objects.get(name="<NAME>").alive, True)
self.assertEqual(Person.objects.get(name="<NAME>").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
"_selected_action": ['1'],
"action": ['', 'delete_selected'],
}
self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertIs(Person.objects.get(name="<NAME>").alive, False)
self.assertEqual(Person.objects.get(name="<NAME>").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get(reverse('admin:admin_views_person_changelist'))
self.assertNotEqual(response.context['cl'].list_editable, ())
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?%s' % IS_POPUP_VAR)
self.assertEqual(response.context['cl'].list_editable, ())
def test_pk_hidden_fields(self):
"""
hidden pk fields aren't displayed in the table body and their
corresponding human-readable value is displayed instead. The hidden pk
fields are displayed but separately (not in the table) and only once.
"""
story1 = Story.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = Story.objects.create(
title='Crouching Tiger, Hidden Python',
content='The Python was sneaking into...',
)
response = self.client.get(reverse('admin:admin_views_story_changelist'))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n</div>'
% (story2.id, story1.id),
html=True
)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
""" Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(
title='The adventures of Guido',
content='Once upon a time in Djangoland...',
)
story2 = OtherStory.objects.create(
title='Crouching Tiger, Hidden Python',
content='The Python was sneaking into...',
)
link1 = reverse('admin:admin_views_otherstory_change', args=(story1.pk,))
link2 = reverse('admin:admin_views_otherstory_change', args=(story2.pk,))
response = self.client.get(reverse('admin:admin_views_otherstory_changelist'))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n</div>'
% (story2.id, story1.id),
html=True
)
self.assertContains(response, '<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id), 1)
self.assertContains(response, '<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id), 1)
@override_settings(ROOT_URLCONF='admin_views.urls')
class AdminSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.joepublicuser = User.objects.create_user(username='joepublic', password='<PASSWORD>')
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.per1 = Person.objects.create(name='<NAME>', gender=1, alive=True)
cls.per2 = Person.objects.create(name='<NAME>', gender=1, alive=False)
cls.per3 = Person.objects.create(name='<NAME>', gender=1, alive=True)
Person.objects.create(name='<NAME>', gender=1)
Person.objects.create(name="<NAME>", gender=1)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(the_recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(the_recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text='Bar')
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text='Foo')
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text='Few')
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text='Bas')
def setUp(self):
self.client.force_login(self.superuser)
def test_search_on_sibling_models(self):
"A search that mentions sibling models"
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=bar')
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
The to_field GET parameter is preserved when a search is performed.
Refs #10918.
"""
response = self.client.get(reverse('admin:auth_user_changelist') + '?q=joe&%s=id' % TO_FIELD_VAR)
self.assertContains(response, "\n1 user\n")
self.assertContains(response, '<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR, html=True)
def test_exact_matches(self):
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=bar')
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=ba')
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?q=Gui')
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?q=uido')
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(reverse('admin:admin_views_pluggablesearchperson_changelist') + '?q=Bob')
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(reverse('admin:admin_views_pluggablesearchperson_changelist') + '?q=20')
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?q=Gui')
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">5 total</a>)</span>""",
html=True
)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=bar')
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True
)
self.assertTrue(response.context['cl'].show_admin_actions)
def test_search_with_spaces(self):
url = reverse('admin:admin_views_person_changelist') + '?q=%s'
tests = [
('"<NAME>"', 1),
("'<NAME>'", 1),
('<NAME>', 0),
('"<NAME>" John', 1),
("'<NAME>' John", 1),
("<NAME> John", 0),
('"<NAME>"', 1),
("'<NAME>'", 1),
("'<NAME>\\'Hara'", 1),
]
for search, hits in tests:
with self.subTest(search=search):
response = self.client.get(url % search)
self.assertContains(response, '\n%s person' % hits)
@override_settings(ROOT_URLCONF='admin_views.urls')
class AdminInheritedInlinesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
self.client.force_login(self.superuser)
def test_inline(self):
"""
Inline models which inherit from a common parent are correctly handled.
"""
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get(reverse('admin:admin_views_persona_add'))
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post(reverse('admin:admin_views_persona_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get(reverse('admin:admin_views_persona_change', args=(persona_id,)))
names | |
----------
# MAGIC %md
# MAGIC Let's take a look at the DataFrame's schema and some of its rows.
# COMMAND ----------
dataDF.printSchema()
# COMMAND ----------
# MAGIC %md
# MAGIC We can register the newly created DataFrame as a named table, using the `registerDataFrameAsTable()` method.
# COMMAND ----------
sqlContext.registerDataFrameAsTable(dataDF, 'dataframe')
# COMMAND ----------
# MAGIC %md
# MAGIC What methods can we call on this DataFrame?
# COMMAND ----------
help(dataDF)
# COMMAND ----------
# MAGIC %md
# MAGIC How many partitions will the DataFrame be split into?
# COMMAND ----------
dataDF.rdd.getNumPartitions()
# COMMAND ----------
# MAGIC %md
# MAGIC ###### A note about DataFrames and queries
# MAGIC
# MAGIC When you use DataFrames or Spark SQL, you are building up a _query plan_. Each transformation you apply to a DataFrame adds some information to the query plan. When you finally call an action, which triggers execution of your Spark job, several things happen:
# MAGIC
# MAGIC 1. Spark's Catalyst optimizer analyzes the query plan (called an _unoptimized logical query plan_) and attempts to optimize it. Optimizations include (but aren't limited to) rearranging and combining `filter()` operations for efficiency, converting `Decimal` operations to more efficient long integer operations, and pushing some operations down into the data source (e.g., a `filter()` operation might be translated to a SQL `WHERE` clause, if the data source is a traditional SQL RDBMS). The result of this optimization phase is an _optimized logical plan_.
# MAGIC 2. Once Catalyst has an optimized logical plan, it then constructs multiple _physical_ plans from it. Specifically, it implements the query in terms of lower level Spark RDD operations.
# MAGIC 3. Catalyst chooses which physical plan to use via _cost optimization_. That is, it determines which physical plan is the most efficient (or least expensive), and uses that one.
# MAGIC 4. Finally, once the physical RDD execution plan is established, Spark actually executes the job.
# MAGIC
# MAGIC You can examine the query plan using the `explain()` function on a DataFrame. By default, `explain()` only shows you the final physical plan; however, if you pass it an argument of `True`, it will show you all phases.
# MAGIC
# MAGIC (If you want to take a deeper dive into how Catalyst optimizes DataFrame queries, this blog post, while a little old, is an excellent overview: [Deep Dive into Spark SQL's Catalyst Optimizer](https://databricks.com/blog/2015/04/13/deep-dive-into-spark-sqls-catalyst-optimizer.html).)
# MAGIC
# MAGIC Let's add a couple transformations to our DataFrame and look at the query plan on the resulting transformed DataFrame. Don't be too concerned if it looks like gibberish. As you gain more experience with Apache Spark, you'll begin to be able to use `explain()` to help you understand more about your DataFrame operations.
# COMMAND ----------
newDF = dataDF.distinct().select('*')
newDF.explain(True)
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3c): Subtract one from each value using _select_
# MAGIC
# MAGIC So far, we've created a distributed DataFrame that is split into many partitions, where each partition is stored on a single machine in our cluster. Let's look at what happens when we do a basic operation on the dataset. Many useful data analysis operations can be specified as "do something to each item in the dataset". These data-parallel operations are convenient because each item in the dataset can be processed individually: the operation on one entry doesn't effect the operations on any of the other entries. Therefore, Spark can parallelize the operation.
# MAGIC
# MAGIC One of the most common DataFrame operations is `select()`, and it works more or less like a SQL `SELECT` statement: You can select specific columns from the DataFrame, and you can even use `select()` to create _new_ columns with values that are derived from existing column values. We can use `select()` to create a new column that decrements the value of the existing `age` column.
# MAGIC
# MAGIC `select()` is a _transformation_. It returns a new DataFrame that captures both the previous DataFrame and the operation to add to the query (`select`, in this case). But it does *not* actually execute anything on the cluster. When transforming DataFrames, we are building up a _query plan_. That query plan will be optimized, implemented (in terms of RDDs), and executed by Spark _only_ when we call an action.
# COMMAND ----------
# Transform dataDF through a select transformation and rename the newly created '(age -1)' column to 'age'
# Because select is a transformation and Spark uses lazy evaluation, no jobs, stages,
# or tasks will be launched when we run this code.
subDF = dataDF.select('last_name', 'first_name', 'ssn', 'occupation', (dataDF.age - 1).alias('age'))
# COMMAND ----------
# MAGIC %md
# MAGIC Let's take a look at the query plan.
# COMMAND ----------
subDF.explain(True)
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3d) Use _collect_ to view results
# MAGIC
# MAGIC <img src="http://spark-mooc.github.io/web-assets/images/cs105x/diagram-3d.png" style="height:700px;float:right"/>
# MAGIC
# MAGIC To see a list of elements decremented by one, we need to create a new list on the driver from the the data distributed in the executor nodes. To do this we can call the `collect()` method on our DataFrame. `collect()` is often used after transformations to ensure that we are only returning a *small* amount of data to the driver. This is done because the data returned to the driver must fit into the driver's available memory. If not, the driver will crash.
# MAGIC
# MAGIC The `collect()` method is the first action operation that we have encountered. Action operations cause Spark to perform the (lazy) transformation operations that are required to compute the values returned by the action. In our example, this means that tasks will now be launched to perform the `createDataFrame`, `select`, and `collect` operations.
# MAGIC
# MAGIC In the diagram, the dataset is broken into four partitions, so four `collect()` tasks are launched. Each task collects the entries in its partition and sends the result to the driver, which creates a list of the values, as shown in the figure below.
# MAGIC
# MAGIC Now let's run `collect()` on `subDF`.
# COMMAND ----------
# Let's collect the data
results = subDF.collect()
print results
# COMMAND ----------
# MAGIC %md
# MAGIC A better way to visualize the data is to use the `show()` method. If you don't tell `show()` how many rows to display, it displays 20 rows.
# COMMAND ----------
subDF.show()
# COMMAND ----------
# MAGIC %md
# MAGIC If you'd prefer that `show()` not truncate the data, you can tell it not to:
# COMMAND ----------
subDF.show(n=30, truncate=False)
# COMMAND ----------
# MAGIC %md
# MAGIC In Databricks, there's an even nicer way to look at the values in a DataFrame: The `display()` helper function.
# COMMAND ----------
display(subDF)
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3e) Use _count_ to get total
# MAGIC
# MAGIC One of the most basic jobs that we can run is the `count()` job which will count the number of elements in a DataFrame, using the `count()` action. Since `select()` creates a new DataFrame with the same number of elements as the starting DataFrame, we expect that applying `count()` to each DataFrame will return the same result.
# MAGIC
# MAGIC <img src="http://spark-mooc.github.io/web-assets/images/cs105x/diagram-3e.png" style="height:700px;float:right"/>
# MAGIC
# MAGIC Note that because `count()` is an action operation, if we had not already performed an action with `collect()`, then Spark would now perform the transformation operations when we executed `count()`.
# MAGIC
# MAGIC Each task counts the entries in its partition and sends the result to your SparkContext, which adds up all of the counts. The figure on the right shows what would happen if we ran `count()` on a small example dataset with just four partitions.
# COMMAND ----------
print dataDF.count()
print subDF.count()
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3f) Apply transformation _filter_ and view results with _collect_
# MAGIC
# MAGIC Next, we'll create a new DataFrame that only contains the people whose ages are less than 10. To do this, we'll use the `filter()` transformation. (You can also use `where()`, an alias for `filter()`, if you prefer something more SQL-like). The `filter()` method is a transformation operation that creates a new | |
<reponame>leodengyx/LeoMuFundPicker
#!/usr/bin/python
from mutualfund import MutualFund
import logger
import urllib2
import urllib
import json
import types
from bs4 import BeautifulSoup
import re
import os
import sys
import threading
logger = logger.get_logger(__name__)
def thread_func(downloader, thread_id, exporter):
logger.debug("Starting thread %d" % thread_id)
mutual_fund_id_count = len(downloader.mutual_fund_id_list)
start_pos = mutual_fund_id_count/20 * thread_id
end_pos = mutual_fund_id_count/20 * (thread_id+1)
for i in range(start_pos, end_pos):
if i >= len(downloader.mutual_fund_id_list):
return
mutual_fund_inst = downloader.save_mutual_fund_info(downloader.mutual_fund_id_list[i])
if mutual_fund_inst is None:
continue
exporter.write_mutual_fund_to_file(mutual_fund_inst, thread_id, i-start_pos)
downloader.lock.acquire()
downloader.current_finish_count += 1
sys.stdout.write("\rNow processing #%d Mutual Fund." % downloader.current_finish_count)
sys.stdout.flush()
downloader.lock.release()
class Downloader:
def __init__(self):
logger.info("__init_() function entry")
self.init_url = "http://www2.morningstar.ca/Components/FundTable/Services/DataHandler2.ashx?CobrandId=0&Culture=en-CA&SolrQuery=(%3b(%3bCurrency%3aCAD%3bFundCodes%3a**%2c*%2c*%3b%26%3b)%3bSecurityType%3a(FO+FV)%3b%26%3b)&Records=-1&FundView=Morningstar_Analytics"
self.mutual_fund_url = "http://quote.morningstar.ca/quicktakes/Fund/f_ca.aspx"
self.mutual_fund_info_url = "http://quote.morningstar.ca/Quicktakes/AjaxProxy.ashx"
self.mutual_fund_annal_info_url = "http://quote.morningstar.ca/QuickTakes/fund/actionprocess.ashx"
self.mutual_fund_id_list_file_name = "mutual_fund_id_list.js"
self.mutualFundCountPerPage = 50
self.totalMutualFundCount = 0
self.mutual_fund_id_list = []
self.current_finish_count = 0
self.lock = threading.Lock()
def __get_mutual_fund_page_count(self):
logger.info("__get_mutual_fund_page_count() function entry")
# Add post parameters
query_args = {"page": "1",
"rp": str(self.mutualFundCountPerPage),
"sortname": "StandardName",
"sortorder": "asc",
"query": "",
"qtype": "StandardName",
"myFilter": "",
"FundIds": ""}
encoded_args = urllib.urlencode(query_args)
request = urllib2.Request(self.init_url,encoded_args)
# Add headers
request.add_header("Referer",
"http://www2.morningstar.ca/Components/FundTable/FundTable2.aspx?CobrandId=0&Culture=en-CA")
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
request.add_header("X-Requested-With",
"XMLHttpRequest")
logger.debug("Http request: %s" % request.get_full_url())
# Get http response and decode the json
response = urllib2.urlopen(request)
json_data = response.read()
decoded_json = json.loads(json_data)
self.totalMutualFundCount = int(decoded_json[u"total"])
logger.debug("Total mutual fund count is %d" % self.totalMutualFundCount)
return self.totalMutualFundCount / self.mutualFundCountPerPage
def __save_mutual_fund_id_list_per_page(self, page_number):
logger.info("__save_mutual_fund_id_list_per_page() function entry. page_number=%d" % page_number)
# Add post parameters
query_args = {"page": str(page_number),
"rp": str(self.mutualFundCountPerPage),
"sortname": "StandardName",
"sortorder": "asc",
"query": "",
"qtype": "StandardName",
"myFilter": "",
"FundIds": ""}
encoded_args = urllib.urlencode(query_args)
request = urllib2.Request(self.init_url,encoded_args)
# Add headers
request.add_header("Referer",
"http://www2.morningstar.ca/Components/FundTable/FundTable2.aspx?CobrandId=0&Culture=en-CA")
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
request.add_header("X-Requested-With",
"XMLHttpRequest")
logger.debug("Http request: %s" % request.get_full_url())
# Get http response and decode the json
response = urllib2.urlopen(request)
json_data = response.read()
decoded_json = json.loads(json_data)
if type(decoded_json[u"rows"]) == types.ListType:
for row in decoded_json[u"rows"]:
mutual_fund_id = row[u"id"]
self.mutual_fund_id_list.append(mutual_fund_id)
logger.debug("Save mutual fund id %s" % mutual_fund_id)
def __write_mutual_fund_id_list_to_file(self):
file_hdlr = open(self.mutual_fund_id_list_file_name, 'w')
json.dump(self.mutual_fund_id_list, file_hdlr)
file_hdlr.close()
def save_mutual_fund_info(self, mutual_fund_id):
logger.info("__save_mutual_fund_info() function entry. {'mutual_fund_id': %s}" % mutual_fund_id)
# Add GET parameters
query_args = {"t": mutual_fund_id,
"region": "CAN",
"culture": "en-CA"}
request = urllib2.Request(self.mutual_fund_url + "?" + urllib.urlencode(query_args))
# Add headers
request.add_header("Referer",
"http://www2.morningstar.ca/Components/FundTable/FundTable2.aspx?CobrandId=0&Culture=en-CA")
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
logger.debug("Http request: %s" % request.get_full_url())
# Get http response and extract the url of mutual fund
response = urllib2.urlopen(request)
soup = BeautifulSoup(response.read(), 'html.parser')
script_list = soup.find_all("script")
pattern = r"t=[a-zA-Z0-9]+®ion=CAN&culture=en-CA&cur=CAD&productCode=CAN"
for script in script_list:
match = re.search(pattern, unicode(script.string))
if match:
url_get_parameter_str = script.string[match.start():match.end()]
# Split url GET parameter string
get_parameter_str_list = url_get_parameter_str.split("&")
get_parameter_dict = {}
for get_parameter_str in get_parameter_str_list:
get_parameter_pair = get_parameter_str.split("=")
get_parameter_dict[get_parameter_pair[0]] = get_parameter_pair[1]
# Create Mutual Fund Instance
mutual_fund_inst = MutualFund()
mutual_fund_inst.fund_id = mutual_fund_id
# save Mutual Fund Head Portion
self.__save_mutual_fund_head_portion(mutual_fund_inst, get_parameter_dict)
if mutual_fund_inst.fund_name == "":
return None
# save Mutual Fund Objective and Strategy Portion
self.__save_mutual_fund_obj_strategy_portion(mutual_fund_inst, get_parameter_dict)
# save Mutual Fund Performance Portion
self.__save_mutual_fund_performance_portion(mutual_fund_inst, get_parameter_dict)
# save Mutual Fund Annual Performance Portion
#self.__save_mutual_fund_annual_performance_portion(mutual_fund_inst, get_parameter_dict)
return mutual_fund_inst
def __save_mutual_fund_head_portion(self, mutual_fund_inst, get_parameter_dict):
logger.info(
"__save_mutual_fund_head_portion() function entry. {'get_parameter_dict': %s}" % get_parameter_dict)
is_loop=True
retry_count = 0
while is_loop and retry_count < 100:
# Get mutual fund header portion
query_args = {"url": "http://quotes.morningstar.com/fund/c-header?",
"t": get_parameter_dict["t"],
"region": get_parameter_dict["region"],
"culture": get_parameter_dict["culture"],
"cur": get_parameter_dict["cur"],
"productCode": get_parameter_dict["productCode"],
"showtitle": "1"}
request = urllib2.Request(self.mutual_fund_info_url + "?" + urllib.urlencode(query_args))
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
request.add_header("X-Requested-With",
"XMLHttpRequest")
logger.debug("Http request: %s" % request.get_full_url())
response = urllib2.urlopen(request)
mutual_fund_info_head_soup = BeautifulSoup(response.read(), "html.parser")
# Save fund name
try:
fund_name_tag = mutual_fund_info_head_soup.find("h1")
fund_name_tag_str = unicode(fund_name_tag.string).lstrip().rstrip()
mutual_fund_inst.fund_name = fund_name_tag_str
logger.debug("Save fund name %s" % fund_name_tag_str)
is_loop=False
except:
is_loop=True
retry_count += 1
logger.error("Error parsing fund name. We got no choice but reparse.")
continue
# Save fund size
try:
total_assets_tag = mutual_fund_info_head_soup.find("span", vkey="TotalAssets")
total_assets_tag_str = unicode(total_assets_tag.string).lstrip().rstrip()
mutual_fund_inst.fund_size = float(total_assets_tag_str[0:total_assets_tag_str.find(' ')])
logger.debug("Save fund size: %f million" % mutual_fund_inst.fund_size)
except:
mutual_fund_inst.fund_size = 0
logger.error("Error reading fund size of fund %s" % mutual_fund_inst.fund_name)
# Save MER
try:
mer_tag = mutual_fund_info_head_soup.find("span", vkey="ExpenseRatio")
mer_tag_str = unicode(mer_tag.string).lstrip().rstrip()
mutual_fund_inst.mer = float(mer_tag_str[0:mer_tag_str.find('%')])
logger.debug("Save fund MER: %f" % mutual_fund_inst.mer)
except:
mutual_fund_inst.mer = 0
logger.error("Error reading MER of fund %s" % mutual_fund_inst.fund_name)
# Save Status
try:
status_tag = mutual_fund_info_head_soup.find("span", vkey="Status")
status_tag_str = unicode(status_tag.string).lstrip().rstrip()
mutual_fund_inst.status = status_tag_str
logger.debug("Save fund status: %s" % mutual_fund_inst.status)
except:
mutual_fund_inst.status = "open"
logger.error("Error reading Status of fund %s" % mutual_fund_inst.fund_name)
# Save Min-Investment
try:
min_investment_tag = mutual_fund_info_head_soup.find("span", vkey="MinInvestment")
min_investment_tag_str = unicode(min_investment_tag.string).lstrip().rstrip()
mutual_fund_inst.min_inve_initial = int(min_investment_tag_str.replace(',',''))
logger.debug("Save fun minimum investment: %d" % mutual_fund_inst.min_inve_initial)
except:
mutual_fund_inst.min_inve_initial = 0
logger.error("Error reading Min Invest of fund %s" % mutual_fund_inst.fund_name)
# Save Category
try:
category_tag = mutual_fund_info_head_soup.find("span", vkey="MorningstarCategory")
category_tag_str = unicode(category_tag.string).lstrip().rstrip()
mutual_fund_inst.category = category_tag_str
logger.debug("Save fund category: %s" % mutual_fund_inst.category)
except:
mutual_fund_inst.category = ""
logger.error("Error reading Category of fund %s" % mutual_fund_inst.fund_name)
# Save Invest-Style
try:
invest_style_tag = mutual_fund_info_head_soup.find("span", vkey="InvestmentStyle")
for string in invest_style_tag.strings:
if len(string.lstrip().rstrip()) != 0:
mutual_fund_inst.inve_style = string.lstrip().rstrip()
logger.debug("Save fund invest style: %s" % mutual_fund_inst.inve_style)
break
except:
mutual_fund_inst.inve_style = ""
logger.error("Error reading Invest Style of fund %s" % mutual_fund_inst.fund_name)
def __save_mutual_fund_obj_strategy_portion(self, mutual_fund_inst, get_parameter_dict):
logger.info(
"__save_mutual_fund_obj_strategy_portion() function entry. {'get_parameter_dict': %s}" % get_parameter_dict)
# Get mutual fund objective and strategy portion
query_args = {"url": "http://financials.morningstar.com/fund/investObjAndStrategy.html?",
"t": get_parameter_dict["t"],
"region": get_parameter_dict["region"],
"culture": get_parameter_dict["culture"],
"cur": get_parameter_dict["cur"],
"productCode": get_parameter_dict["productCode"]}
request = urllib2.Request(self.mutual_fund_info_url + "?" + urllib.urlencode(query_args))
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
logger.debug("Http request: %s" % request.get_full_url())
response = urllib2.urlopen(request)
mutual_fund_info_obj_strategy_soup = BeautifulSoup(response.read(), "html.parser")
# Save Objective and Strategy
try:
div_tag_list = mutual_fund_info_obj_strategy_soup.find_all("div")
mutual_fund_inst.inve_objective_strategy = unicode(div_tag_list[1].string).lstrip().rstrip()
logger.debug("Save fund objective and strategy: %s" % mutual_fund_inst.inve_objective_strategy)
except:
mutual_fund_inst.inve_objective_strategy = ""
logger.error("Error reading Invest Objective Strategy of fund %s" % mutual_fund_inst.fund_name)
def __save_mutual_fund_performance_portion(self, mutual_fund_inst, get_parameter_dict):
logger.info(
"__save_mutual_fund_performance_portion() function entry. {'get_parameter_dict': %s}" % get_parameter_dict)
# Get mutual fund performance portion
query_args = {"url": "http://quotes.morningstar.com/fund/c-performance?",
"t": get_parameter_dict["t"],
"region": get_parameter_dict["region"],
"culture": get_parameter_dict["culture"],
"cur": get_parameter_dict["cur"],
"productCode": get_parameter_dict["productCode"]}
request = urllib2.Request(self.mutual_fund_info_url + "?" + urllib.urlencode(query_args))
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
request.add_header("X-Requested-With",
"XMLHttpRequest")
logger.debug("Http request: %s" % request.get_full_url())
response = urllib2.urlopen(request)
mutual_fund_info_performance_soup = BeautifulSoup(response.read(), "html.parser")
# Save Performance
tr_tag_list = mutual_fund_info_performance_soup.find_all("tr")
# Save growth_of_ten_thousand_YTD
try:
if unicode(tr_tag_list[2].contents[3].string) != u"\u2014":
growth_of_ten_thousand_YTD = float(unicode(tr_tag_list[2].contents[3].string).replace(",", ""))
mutual_fund_inst.growth_of_ten_thousand_YTD = growth_of_ten_thousand_YTD
logger.debug("Save growth_of_ten_thousand_YTD %f" % mutual_fund_inst.growth_of_ten_thousand_YTD)
except:
mutual_fund_inst.growth_of_ten_thousand_YTD = 0
logger.error("Error reading growth_of_ten_thousand_YTD of fund %s" % mutual_fund_inst.fund_name)
# Save growth_of_ten_thousand_1month
try:
if unicode(tr_tag_list[2].contents[5].string) != u"\u2014":
growth_of_ten_thousand_1month = float(unicode(tr_tag_list[2].contents[5].string).replace(",", ""))
mutual_fund_inst.growth_of_ten_thousand_1month = growth_of_ten_thousand_1month
logger.debug("Save growth_of_ten_thousand_1month %f" % mutual_fund_inst.growth_of_ten_thousand_1month)
except:
mutual_fund_inst.growth_of_ten_thousand_1month = 0
logger.error("Error reading growth_of_ten_thousand_1month of fund %s" % mutual_fund_inst.fund_name)
# Save growth_of_ten_thousand_1year
try:
if unicode(tr_tag_list[2].contents[7].string) != u"\u2014":
growth_of_ten_thousand_1year = float(unicode(tr_tag_list[2].contents[7].string).replace(",", ""))
mutual_fund_inst.growth_of_ten_thousand_1year = growth_of_ten_thousand_1year
logger.debug("Save growth_of_ten_thousand_1year %f" % mutual_fund_inst.growth_of_ten_thousand_1year)
except:
mutual_fund_inst.growth_of_ten_thousand_1year = 0
logger.error("Error reading growth_of_ten_thousand_1year of fund %s" % mutual_fund_inst.fund_name)
# Save growth_of_ten_thousand_3year
try:
if unicode(tr_tag_list[2].contents[11].string) != u"\u2014":
growth_of_ten_thousand_3year = float(unicode(tr_tag_list[2].contents[11].string).replace(",", ""))
mutual_fund_inst.growth_of_ten_thousand_3year = growth_of_ten_thousand_3year
logger.debug("Save growth_of_ten_thousand_3year %f" % mutual_fund_inst.growth_of_ten_thousand_3year)
except:
mutual_fund_inst.growth_of_ten_thousand_3year = 0
logger.error("Error reading growth_of_ten_thousand_3year of fund %s" % mutual_fund_inst.fund_name)
# Save growth_of_ten_thousand_5year
try:
if unicode(tr_tag_list[2].contents[15].string) != u"\u2014":
growth_of_ten_thousand_5year = float(unicode(tr_tag_list[2].contents[15].string).replace(",", ""))
mutual_fund_inst.growth_of_ten_thousand_5year = growth_of_ten_thousand_5year
logger.debug("Save growth_of_ten_thousand_5year %f" % mutual_fund_inst.growth_of_ten_thousand_5year)
except:
mutual_fund_inst.growth_of_ten_thousand_5year = 0
logger.error("Error reading growth_of_ten_thousand_5year of fund %s" % mutual_fund_inst.fund_name)
# Save growth_of_ten_thousand_10year
try:
if unicode(tr_tag_list[2].contents[19].string) != u"\u2014":
growth_of_ten_thousand_10year = float(unicode(tr_tag_list[2].contents[19].string).replace(",", ""))
mutual_fund_inst.growth_of_ten_thousand_10year = growth_of_ten_thousand_10year
logger.debug("Save growth_of_ten_thousand_10year %f" % mutual_fund_inst.growth_of_ten_thousand_10year)
except:
mutual_fund_inst.growth_of_ten_thousand_10year = 0
logger.error("Error reading growth_of_ten_thousand_10year of fund %s" % mutual_fund_inst.fund_name)
# Save growth_fund_YTD
try:
if unicode(tr_tag_list[3].contents[3].string) != u"\u2014":
growth_fund_YTD = float(unicode(tr_tag_list[3].contents[3].string).replace(",", ""))
mutual_fund_inst.growth_fund_YTD = growth_fund_YTD
logger.debug("Save growth_fund_YTD %f" % mutual_fund_inst.growth_fund_YTD)
except:
mutual_fund_inst.growth_fund_YTD = 0
logger.error("Error reading growth_fund_YTD of fund %s" % mutual_fund_inst.fund_name)
# Save growth_fund_1month
try:
if unicode(tr_tag_list[3].contents[5].string) != u"\u2014":
growth_fund_1month = float(unicode(tr_tag_list[3].contents[5].string).replace(",", ""))
mutual_fund_inst.growth_fund_1month = growth_fund_1month
logger.debug("Save growth_fund_1month %f" % mutual_fund_inst.growth_fund_1month)
except:
mutual_fund_inst.growth_fund_1month = 0
logger.error("Error reading growth_fund_1month of fund %s" % mutual_fund_inst.fund_name)
# Save growth_fund_1year
try:
if unicode(tr_tag_list[3].contents[7].string) != u"\u2014":
growth_fund_1year = float(unicode(tr_tag_list[3].contents[7].string).replace(",", ""))
mutual_fund_inst.growth_fund_1year = growth_fund_1year
logger.debug("Save growth_fund_1year %f" % mutual_fund_inst.growth_fund_1year)
except:
mutual_fund_inst.growth_fund_1year = 0
logger.error("Error reading growth_fund_1year of fund %s" % mutual_fund_inst.fund_name)
# Save growth_fund_3year
try:
if unicode(tr_tag_list[3].contents[11].string) != u"\u2014":
growth_fund_3year = float(unicode(tr_tag_list[3].contents[11].string).replace(",", ""))
mutual_fund_inst.growth_fund_3year = growth_fund_3year
logger.debug("Save growth_fund_3year %f" % mutual_fund_inst.growth_fund_3year)
except:
mutual_fund_inst.growth_fund_3year = 0
logger.error("Error reading growth_fund_3year of fund %s" % mutual_fund_inst.fund_name)
# Save | |
<reponame>xPrithvi/Newsfeed<gh_stars>1-10
# Importing the Python modules, the dependencies of the application.
import random
import os
import sys
import json
import time
import urllib.request
import subprocess
from bs4 import BeautifulSoup
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
# Main algorithm.
def run():
"""The main algorithm of the program. The GUI of the NewsFeed application is
constructed from the MainWindow class."""
application = QApplication(sys.argv)
gui = MainWindow()
sys.exit(application.exec_())
# Class for the NewsFeed GUI.
class MainWindow(QMainWindow):
"""The class for the NewsFeed GUI."""
# Class variables are created, they are used by all methods of the MainWindow class, the SearchArticlesThread class and the SaveArticlesThread class.
"""The API Key is needed to make URL requests to https://newsapi.org/"""
APIKEY = None
"""This list contains all news sources that news articles will be collected from."""
news_sources = []
"""This variable decides if whether the top or the latest news articles from a given news sources will be collected."""
sort_by_var = "top"
"""These lists contain the ID's of individual news articles saved on the computer, these ID's will be used by
the program to distinguish between which news articles to save for offline use."""
article_list = []
selected_articles = []
"""This variable contains the value of the progressbar."""
progress_bar_value = 0
"""This variable contains the working directory of the application. The working directory of the application is
identical to the directory where NewsAPI.exe is located."""
working_directory = (os.getcwd())
executable_directory = (working_directory + "\\" + "NewsFeed.exe")
# The initiation method is ran once an instance of the MainWindow class is created.
def __init__(self, parent = None):
"""The initiation/constructor method for MainWindow class. In this method the widgets are created for the NewsFeed GUI."""
# The title and window size of the NewsAPI GUI is set.
super(MainWindow, self).__init__()
self.setWindowTitle("NewsFeed 1.0.0 (64-bit)")
self.setGeometry(150, 150, 800, 500)
"""The Main Thread (GUI Thread) of the NewsFeed application displays the GUI. The Main Thread
is connected by signals to the SearchArticlesThread and the SaveArticlesThread. These signals
allow for the threads to communicate to the Main Thread."""
# Instance of the SearchArticlesThread is created.
MainWindow.SearchArticlesThread = SearchArticlesThread()
# Signals between the Main Thread and the Search Articles Thread are created.
self.SearchArticlesThread.display_articles_signal.connect(MainWindow.display_articles)
self.SearchArticlesThread.clear_textbox_signal.connect(MainWindow.clear_textbox)
self.SearchArticlesThread.update_statusbar_signal.connect(MainWindow.update_statusbar_articles_thread)
self.SearchArticlesThread.update_progressbar_signal.connect(MainWindow.update_progressbar)
self.SearchArticlesThread.terminate_signal.connect(MainWindow.terminate_event_handler)
self.SearchArticlesThread.no_connection_signal.connect(MainWindow.no_internet_connection)
# Instance of the SaveArticlesThread is created.
MainWindow.SaveArticlesThread = SaveArticlesThread()
# Signals between the Main Thread and the Save Articles Thread are created.
self.SaveArticlesThread.update_statusbar_signal.connect(MainWindow.update_statusbar_save_articles_thread)
self.SaveArticlesThread.update_progressbar_signal.connect(MainWindow.update_progressbar)
self.SaveArticlesThread.terminate_signal.connect(MainWindow.terminate_event_handler)
self.SaveArticlesThread.no_connection_signal.connect(MainWindow.no_internet_connection)
# Toolbar, Buttons and widgets are created.
MainWindow.Toolbar = self.addToolBar("Toolbar")
MainWindow.Search_Button = QAction("Search", self)
MainWindow.Refresh_Button = QAction("Refresh", self)
MainWindow.Cancel_Button = QAction("Cancel", self)
MainWindow.Save_Button = QAction("Save Articles", self)
MainWindow.Load_Button = QAction("Load Articles", self)
MainWindow.Filter_Button = QAction("Filter", self)
MainWindow.Open_Config_Button = QAction("Open Config", self)
MainWindow.Filter_Bar = QLineEdit(self)
MainWindow.Categories = QComboBox(self)
MainWindow.Categories.addItem("Default")
MainWindow.Categories.addItem("All")
MainWindow.Categories.addItem("Business")
MainWindow.Categories.addItem("Entertainment")
MainWindow.Categories.addItem("Gaming")
MainWindow.Categories.addItem("Health")
MainWindow.Categories.addItem("Music")
MainWindow.Categories.addItem("Politics")
MainWindow.Categories.addItem("Science")
MainWindow.Categories.addItem("Sport")
MainWindow.Categories.addItem("Technology")
MainWindow.Categories.addItem("Argentina")
MainWindow.Categories.addItem("Australia")
MainWindow.Categories.addItem("Brazil")
MainWindow.Categories.addItem("Canada")
MainWindow.Categories.addItem("China")
MainWindow.Categories.addItem("France")
MainWindow.Categories.addItem("Germany")
MainWindow.Categories.addItem("India")
MainWindow.Categories.addItem("Ireland")
MainWindow.Categories.addItem("Israel")
MainWindow.Categories.addItem("Italy")
MainWindow.Categories.addItem("Netherlands")
MainWindow.Categories.addItem("Norway")
MainWindow.Categories.addItem("Pakistan")
MainWindow.Categories.addItem("Russia")
MainWindow.Categories.addItem("Saudi Arabia")
MainWindow.Categories.addItem("South Africa")
MainWindow.Categories.addItem("Spain")
MainWindow.Categories.addItem("Sweden")
MainWindow.Categories.addItem("United Kingdom")
MainWindow.Categories.addItem("United States")
MainWindow.SortBy = QComboBox(self)
MainWindow.SortBy.addItem("Top")
MainWindow.SortBy.addItem("Latest")
MainWindow.ProgressBar = QProgressBar(self)
# Buttons and widgets are connected to their class methods.
MainWindow.Search_Button.triggered.connect(MainWindow.search_button_handler)
MainWindow.Refresh_Button.triggered.connect(MainWindow.refresh_event_handler)
MainWindow.Cancel_Button.triggered.connect(MainWindow.terminate_event_handler)
MainWindow.Save_Button.triggered.connect(MainWindow.save_button_handler)
MainWindow.Load_Button.triggered.connect(MainWindow.load_button_handler)
MainWindow.Filter_Button.triggered.connect(MainWindow.filter_button_handler)
MainWindow.Open_Config_Button.triggered.connect(MainWindow.open_config_button_event_handler)
MainWindow.Categories.activated[str].connect(MainWindow.categories_event_handler)
MainWindow.SortBy.activated[str].connect(MainWindow.sort_by_event_handler)
# Widgets are added to the toolbar.
MainWindow.Toolbar.addWidget(MainWindow.Categories)
MainWindow.Toolbar.addWidget(MainWindow.SortBy)
MainWindow.Toolbar.addAction(MainWindow.Search_Button)
MainWindow.Toolbar.addAction(MainWindow.Refresh_Button)
MainWindow.Toolbar.addAction(MainWindow.Cancel_Button)
MainWindow.Toolbar.addWidget(MainWindow.ProgressBar)
MainWindow.Toolbar.addAction(MainWindow.Filter_Button)
MainWindow.Toolbar.addWidget(MainWindow.Filter_Bar)
MainWindow.Toolbar.addAction(MainWindow.Save_Button)
MainWindow.Toolbar.addAction(MainWindow.Load_Button)
MainWindow.Toolbar.addAction(MainWindow.Open_Config_Button)
# Textbox is created as the CentralWidget.
MainWindow.Textbox = QTextBrowser(self)
self.setCentralWidget(MainWindow.Textbox)
MainWindow.Textbox.setReadOnly(True)
MainWindow.Textbox.setOpenExternalLinks(True)
MainWindow.Textbox.insertHtml("<b> NewsFeed 1.0.0 (64-bit) </b>")
MainWindow.Textbox.insertPlainText("\n")
MainWindow.Textbox.insertHtml("<b> Prithvi R, powered by News API </b>")
MainWindow.Textbox.insertPlainText("\n")
MainWindow.Textbox.insertHtml("<b>" + MainWindow.executable_directory + "</b>")
MainWindow.Textbox.insertPlainText("\n")
MainWindow.Textbox.insertPlainText("")
MainWindow.Textbox.insertPlainText("\n")
# Statusbar is added.
MainWindow.StatusBar = self.statusBar()
# MainWindow is displayed to the screen.
self.show()
# Console output.
print("=============================================")
print("NewsFeed 1.0.0 (64-bit)")
print("Prithvi R, powered by News API")
print("<28/12/2017>")
print(MainWindow.executable_directory)
print("=============================================")
# Configuration file is loaded.
MainWindow.load_configuration_file()
# Method is activated once the "Open Config" button is pressed.
def open_config_button_event_handler():
"""This method is activated once the "Config" button is pressed. This function opens the default text editor
on the user's system using the subprocess module to edit the configuration file."""
print("<GUI Thread Process: Open Configuration File>")
config_file = subprocess.Popen(["notepad.exe", (MainWindow.working_directory + "\Config.txt")])
config_file.wait()
MainWindow.load_configuration_file()
# Method is activated once the "Search" button is pressed.
def search_button_handler():
"""This method is activated once the "Search" button is pressed. This function disables all buttons
excluding the "Cancel" button. This function also starts the SearchArticlesThread."""
print("<GUI Thread Process: Search Articles Thread Active>")
MainWindow.Search_Button.setEnabled(False)
MainWindow.Refresh_Button.setEnabled(False)
MainWindow.SortBy.setEnabled(False)
MainWindow.Categories.setEnabled(False)
MainWindow.Filter_Button.setEnabled(False)
MainWindow.Save_Button.setEnabled(False)
MainWindow.Load_Button.setEnabled(False)
MainWindow.Open_Config_Button.setEnabled(False)
MainWindow.SearchArticlesThread.start()
# Method is activated once the "Save Articles" button is pressed.
def save_button_handler():
"""This method is activated once the "Save Articles" button is pressed. This function disables all buttons
excluding the "Cancel" button. This function also starts the SaveArticlesThread."""
print("<GUI Thread Process: Save Articles Thread Active>")
MainWindow.SaveArticlesThread.start()
MainWindow.Search_Button.setEnabled(False)
MainWindow.Refresh_Button.setEnabled(False)
MainWindow.Categories.setEnabled(False)
MainWindow.SortBy.setEnabled(False)
MainWindow.Filter_Button.setEnabled(False)
MainWindow.Save_Button.setEnabled(False)
MainWindow.Load_Button.setEnabled(False)
MainWindow.Open_Config_Button.setEnabled(False)
# Method is activated once the "Load" button is pressed.
def load_button_handler():
"""This method is activated once the "Load" button is pressed. The user enters a directory through
a file browser, the method loads all JSON files in the choosen directory and displays the articles
contained in the JSON files to the GUI."""
#Console output.
print("<GUI Thread Process: Load Articles>")
#User inputs directory.
try:
directory = str(QFileDialog.getExistingDirectory())
os.chdir(directory)
except OSError:
print("<GUI Thread Process: Error: No Directory Choosen>")
#Clear textbox.
MainWindow.clear_textbox()
#Reset article list.
MainWindow.article_list = []
MainWindow.selected_articles = []
#Articles in the current directory are displayed.
for news_source in MainWindow.news_sources:
# File name is defined.
file_name = (news_source + "-" + MainWindow.sort_by_var + ".json")
try:
# File is opened and the data is loaded.
json_file = open(file_name, "r")
data = json.load(json_file)
# Displays articles in the data to the GUI.
for article in data:
MainWindow.article_list.append(article["ID"])
MainWindow.selected_articles.append(article["ID"])
MainWindow.Textbox.insertHtml("<h2>" + article["Title"] + "</h2>")
MainWindow.Textbox.insertPlainText("\n")
if article["Description"] is "":
pass
elif article["Description"] is None:
pass
else:
MainWindow.Textbox.insertHtml(article["Description"])
MainWindow.Textbox.insertPlainText("\n")
link = ("<a href= " + article["URL"] + ">" + article["URL"] + " </a>")
MainWindow.Textbox.insertHtml(link)
MainWindow.Textbox.insertPlainText("\n")
MainWindow.Textbox.insertPlainText("")
MainWindow.Textbox.insertPlainText("\n")
# File is closed.
json_file.close()
# Error code.
except:
print("<GUI Thread Process: Error: Could Not Find " + file_name + ">")
# Method is activated once the "Filter" button is pressed.
def filter_button_handler():
"""Finds keywords and phrases in the news articles that are displayed to the GUI.
This method searches through the JSON files in the current directory, it identifies
the articles in the JSON files which contain the keyword or phrase. It then
displays these articles to the GUI."""
# Valid characters.
valid_characters = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d",
"e", "f", "g", "h", "i", "j", "k", "i", "m", "n", "o", "p", "q",
"r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D",
"E", "F", "G", "H", "I", "J", "K", "I", "M", "N", "O", "P", "Q",
"R", "S", "T", "U", "V", "W", "X", "Y", "Z", "-", "!", "?", "!"
":", ";", "@", ",", ".", "#", "/", ">", "<", "(", ")", "[", "]"]
string = MainWindow.Filter_Bar.text()
valid_string = False
for character in string:
if character in valid_characters:
valid_string = True
# The Searching algorithm.
if valid_string is True:
# Textbox is cleared.
MainWindow.clear_textbox()
# Selected articles list is reset.
MainWindow.selected_articles = []
print("<GUI Thread Process: Searched For: " + string + ">")
word_found = False
for news_source in MainWindow.news_sources:
# File name is defined.
file_name = (news_source + "-" + MainWindow.sort_by_var + ".json")
try:
# File is opened and the data is loaded.
json_file = open(file_name, "r")
data = json.load(json_file)
# Keyword or phrase is found.
for article in data:
if string in article["Title"]:
word_found = True
MainWindow.selected_articles.append(article["ID"])
if string in article["Description"]:
word_found = True
MainWindow.selected_articles.append(article["ID"])
# File is closed.
json_file.close()
# Error code.
except:
print("<GUI Thread Process: Error: " + file_name + " Not Found>")
# Remove duplicates in selected articles list.
MainWindow.selected_articles = list(set(MainWindow.selected_articles))
# Selected articles are displayed to the GUI.
for news_source in MainWindow.news_sources:
# File name is defined.
file_name = (news_source + "-" + MainWindow.sort_by_var + ".json")
try:
# File is opened and the data is loaded.
json_file = open(file_name, "r")
data = json.load(json_file)
# Contents of the news article is | |
#
# Loxodo -- Password Safe V3 compatible Password Vault
# Copyright (C) 2008 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import hashlib
import struct
from hmac import HMAC
import random
import os
import tempfile
import time
import uuid
from .twofish.twofish_ecb import TwofishECB
from .twofish.twofish_cbc import TwofishCBC
class Vault(object):
"""
Represents a collection of password Records in PasswordSafe V3 format.
The on-disk represenation of the Vault is described in the following file:
http://passwordsafe.svn.sourceforge.net/viewvc/passwordsafe/trunk/pwsafe/pwsafe/docs/formatV3.txt?revision=2139
"""
def __init__(self, password, filename=None):
self.f_tag = None
self.f_salt = None
self.f_iter = None
self.f_sha_ps = None
self.f_b1 = None
self.f_b2 = None
self.f_b3 = None
self.f_b4 = None
self.f_iv = None
self.f_hmac = None
self.header = self.Header()
self.records = []
if not filename:
self._create_empty(password)
else:
self._read_from_file(filename, password)
class BadPasswordError(RuntimeError):
pass
class VaultFormatError(RuntimeError):
pass
class VaultVersionError(VaultFormatError):
pass
class Field(object):
"""
Contains the raw, on-disk representation of a record's field.
"""
def __init__(self, raw_type, raw_len, raw_value):
self.raw_type = raw_type
self.raw_len = raw_len
self.raw_value = raw_value
def is_equal(self, field):
"""
Return True if this Field and the given one are of the same type and both contain the same value.
"""
return self.raw_type == field.raw_type and self.raw_value == field.raw_value
class Header(object):
"""
Contains the fields of a Vault header.
"""
def __init__(self):
self.raw_fields = {}
def add_raw_field(self, raw_field):
self.raw_fields[raw_field.raw_type] = raw_field
class Record(object):
"""
Contains the fields of an individual password record.
"""
def __init__(self):
self.raw_fields = {}
self._uuid = None
self._group = ""
self._title = ""
self._user = ""
self._notes = ""
self._passwd = ""
self._last_mod = 0
self._url = ""
@staticmethod
def create():
record = Vault.Record()
record.uuid = uuid.uuid4()
record.last_mod = int(time.time())
return record
def add_raw_field(self, raw_field):
self.raw_fields[raw_field.raw_type] = raw_field
if (raw_field.raw_type == 0x01):
self._uuid = uuid.UUID(bytes_le=raw_field.raw_value)
if (raw_field.raw_type == 0x02):
self._group = raw_field.raw_value.decode('utf_8', 'replace')
if (raw_field.raw_type == 0x03):
self._title = raw_field.raw_value.decode('utf_8', 'replace')
if (raw_field.raw_type == 0x04):
self._user = raw_field.raw_value.decode('utf_8', 'replace')
if (raw_field.raw_type == 0x05):
self._notes = raw_field.raw_value.decode('utf_8', 'replace')
if (raw_field.raw_type == 0x06):
self._passwd = raw_field.raw_value.decode('utf_8', 'replace')
if ((raw_field.raw_type == 0x0c) and (raw_field.raw_len == 4)):
self._last_mod = struct.unpack("<L", raw_field.raw_value)[0]
if (raw_field.raw_type == 0x0d):
self._url = raw_field.raw_value.decode('utf_8', 'replace')
def mark_modified(self):
self.last_mod = int(time.time())
# TODO: refactor Record._set_xyz methods to be less repetitive
def _get_uuid(self):
return self._uuid
def _set_uuid(self, value):
self._uuid = value
raw_id = 0x01
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, 0, "")
self.raw_fields[raw_id].raw_value = value.bytes_le
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def _get_group(self):
return self._group
def _set_group(self, value):
self._group = value
raw_id = 0x02
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, len(value), value)
self.raw_fields[raw_id].raw_value = value.encode('utf_8', 'replace')
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def _get_title(self):
return self._title
def _set_title(self, value):
self._title = value
raw_id = 0x03
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, len(value), value)
self.raw_fields[raw_id].raw_value = value.encode('utf_8', 'replace')
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def _get_user(self):
return self._user
def _set_user(self, value):
self._user = value
raw_id = 0x04
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, len(value), value)
self.raw_fields[raw_id].raw_value = value.encode('utf_8', 'replace')
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def _get_notes(self):
return self._notes
def _set_notes(self, value):
self._notes = value
raw_id = 0x05
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, len(value), value)
self.raw_fields[raw_id].raw_value = value.encode('utf_8', 'replace')
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def _get_passwd(self):
return self._passwd
def _set_passwd(self, value):
self._passwd = value
raw_id = 0x06
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, len(value), value)
self.raw_fields[raw_id].raw_value = value.encode('utf_8', 'replace')
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def _get_last_mod(self):
return self._last_mod
def _set_last_mod(self, value):
assert type(value) == int
self._last_mod = value
raw_id = 0x0c
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, 0, "0")
self.raw_fields[raw_id].raw_value = struct.pack("<L", value)
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
def _get_url(self):
return self._url
def _set_url(self, value):
self._url = value
raw_id = 0x0d
if (not self.raw_fields.has_key(raw_id)):
self.raw_fields[raw_id] = Vault.Field(raw_id, len(value), value)
self.raw_fields[raw_id].raw_value = value.encode('utf_8', 'replace')
self.raw_fields[raw_id].raw_len = len(self.raw_fields[raw_id].raw_value)
self.mark_modified()
def is_corresponding(self, record):
"""
Return True if Records are the same, based on either UUIDs (if available) or title
"""
if not self.uuid or not record.uuid:
return self.title == record.title
return self.uuid == record.uuid
def is_newer_than(self, record):
"""
Return True if this Record's last modifed date is later than the given one's.
"""
return self.last_mod > record.last_mod
def merge(self, record):
"""
Merge in fields from another Record, replacing existing ones
"""
self.raw_fields = {}
for field in record.raw_fields.values():
self.add_raw_field(field)
uuid = property(_get_uuid, _set_uuid)
group = property(_get_group, _set_group)
title = property(_get_title, _set_title)
user = property(_get_user, _set_user)
notes = property(_get_notes, _set_notes)
passwd = property(_get_passwd, _set_passwd)
last_mod = property(_get_last_mod, _set_last_mod)
url = property(_get_url, _set_url)
@staticmethod
def _stretch_password(password, salt, iterations):
"""
Generate the SHA-256 value of a password after several rounds of stretching.
The algorithm is described in the following paper:
[KEYSTRETCH Section 4.1] http://www.schneier.com/paper-low-entropy.pdf
"""
sha = hashlib.sha256()
sha.update(password)
sha.update(salt)
stretched_password = <PASSWORD>()
for dummy in range(iterations):
stretched_password = hashlib.sha256(stretched_password).digest()
return stretched_password
def _read_field_tlv(self, filehandle, cipher):
"""
Return one field of a vault record by reading from the given file handle.
"""
data = filehandle.read(16)
if (not data) or (len(data) < 16):
raise self.VaultFormatError("EOF encountered when parsing record field")
if data == "PWS3-EOFPWS3-EOF":
return None
data = cipher.decrypt(data)
raw_len = struct.unpack("<L", data[0:4])[0]
raw_type = struct.unpack("<B", data[4])[0]
raw_value = data[5:]
if (raw_len > 11):
for dummy in range((raw_len+4)//16):
data = filehandle.read(16)
if (not data) or (len(data) < 16):
raise self.VaultFormatError("EOF encountered when parsing record field")
raw_value += cipher.decrypt(data)
raw_value = raw_value[:raw_len]
return self.Field(raw_type, raw_len, raw_value)
@staticmethod
def _urandom(count):
try:
return os.urandom(count)
except NotImplementedError:
retval = ""
for dummy in range(count):
retval += struct.pack("<B", random.randint(0, 0xFF))
return retval
def _write_field_tlv(self, filehandle, cipher, field):
"""
Write one field of a vault record using the given file handle.
"""
if (field is None):
filehandle.write("PWS3-EOFPWS3-EOF")
return
assert len(field.raw_value) == field.raw_len
raw_len = struct.pack("<L", field.raw_len)
raw_type = struct.pack("<B", field.raw_type)
raw_value = field.raw_value
# Assemble TLV block and pad to 16-byte boundary
data = raw_len + raw_type + raw_value
if (len(data) % 16 != 0):
pad_count = 16 - (len(data) % 16)
data += self._urandom(pad_count)
data = cipher.encrypt(data)
filehandle.write(data)
@staticmethod
def create(password, filename):
vault = Vault(password)
vault.write_to_file(filename, password)
pass
def _create_empty(self, password):
assert type(password) != unicode
self.f_tag = 'PWS3'
self.f_salt = Vault._urandom(32)
self.f_iter = 2048
stretched_password = self._stretch_password(password, self.f_salt, self.f_iter)
self.f_sha_ps = hashlib.sha256(stretched_password).digest()
cipher = TwofishECB(stretched_password)
self.f_b1 = cipher.encrypt(Vault._urandom(16))
self.f_b2 = cipher.encrypt(Vault._urandom(16))
self.f_b3 = cipher.encrypt(Vault._urandom(16))
self.f_b4 = cipher.encrypt(Vault._urandom(16))
key_k = cipher.decrypt(self.f_b1) + cipher.decrypt(self.f_b2)
key_l = cipher.decrypt(self.f_b3) + cipher.decrypt(self.f_b4)
self.f_iv = Vault._urandom(16)
hmac_checker = HMAC(key_l, "", hashlib.sha256)
cipher = TwofishCBC(key_k, self.f_iv)
# No records yet
self.f_hmac = hmac_checker.digest()
def _read_from_file(self, filename, password):
"""
Initialize all class members by loading the contents of a Vault stored in the given file.
"""
assert type(password) != unicode
filehandle = file(filename, 'rb')
# read boilerplate
self.f_tag = filehandle.read(4) # TAG: magic tag
if (self.f_tag != 'PWS3'):
raise self.VaultVersionError("Not a PasswordSafe V3 file")
self.f_salt = filehandle.read(32) # SALT: SHA-256 salt
self.f_iter = struct.unpack("<L", filehandle.read(4))[0] # ITER: SHA-256 keystretch iterations
stretched_password = self._stretch_password(password, self.f_salt, self.f_iter) # P': the stretched key
my_sha_ps = hashlib.sha256(stretched_password).digest()
self.f_sha_ps = filehandle.read(32) # H(P'): SHA-256 hash of stretched passphrase
if (self.f_sha_ps != my_sha_ps):
raise self.BadPasswordError("Wrong password")
self.f_b1 = filehandle.read(16) # B1
self.f_b2 = filehandle.read(16) # B2
self.f_b3 = filehandle.read(16) # B3
self.f_b4 = filehandle.read(16) # B4
cipher = TwofishECB(stretched_password)
key_k = cipher.decrypt(self.f_b1) + cipher.decrypt(self.f_b2)
key_l = cipher.decrypt(self.f_b3) + cipher.decrypt(self.f_b4)
self.f_iv = filehandle.read(16) # IV: initialization vector of Twofish CBC
hmac_checker = HMAC(key_l, "", hashlib.sha256)
cipher = TwofishCBC(key_k, self.f_iv)
# read header
while (True):
field = self._read_field_tlv(filehandle, cipher)
if not field:
break
if field.raw_type | |
<reponame>BorgesGabo/gaia
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
# This is a sample controller
# - index is the default action of any application
# - user is required for authentication and authorization
# - download is for downloading files uploaded in the db (does streaming)
# -------------------------------------------------------------------------
import datetime
from prettytable import PrettyTable
from prettytable import ALL
def process_po():
# this function creates a form with date types and query the db between the 2 dates
# this function is an extract from http://brunorocha.org/python/web2py/search-form-with-web2py.html
# default values to keep the form when submitted
# if you do not want defaults set all below to None
date_initial_default = \
datetime.datetime.strptime(request.vars.date_initial, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_inicial else None
date_final_default = \
datetime.datetime.strptime(request.vars.date_final, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_final else None
# The search form created with .factory
form = SQLFORM.factory(
Field("date_initial", "datetime", default=date_initial_default),
Field("date_final", "datetime", default=date_final_default),
formstyle='divs',
submit_button="Search",
)
# The base query to fetch all orders of db.po, db.po_details, db.product
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
# testing if the form was accepted
if form.process().accepted:
# gathering form submitted values
date_initial = form.vars.date_initial
date_final = form.vars.date_final
# more dynamic conditions in to query
if date_initial:
query &= db.po.date >= date_initial
if date_final:
query &= db.po.date <= date_final
count = db(query).count()
results = db(query).select(db.po.po_number,db.po.date,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, db.po.customer_id, orderby='po_number')
msg = T("%s registros encontrados" % count )
ABCD(query)
return dict(form=form, msg=msg, results=results)
def i_file(): # esta funcion es para generar el path y parse un archivo json sin el formulario
import sys
import json
import re
file_name='orders1.json'
uploadfolder='C:\Users\Sandra\Documents\Herbert\Projecto web2py'
#form=SQLFORM.factory(Field('json_file', 'upload', uploadfolder=os.path.join(request.folder, 'uploads'),label='esta el laberl'))
path=('%s\%s' % (uploadfolder, file_name))
print path
print str('path is a type of:')
print type(path)
'''if form.process().accepted:
#file_name=form.vars.json_file
#path=('%s\%s' % (os.path.join(request.folder,'uploads'), file_name))
path=('%s\%s' % (uploadfolder, file_name))
#redirect(URL('printme', args=(path)))
redirect(URL('form1'))
printme(str(path))
return dict(form=form )'''
with open(path) as json_file:
datos=json.load(json_file)
print (type(datos))
print (len(datos))
print datos[1]
return
def printme(str):
#str="hello there"
print str
return dict(str=str)
def gameTime():
print path
return 'OK'
def display_form():
record = db.wp(request.args(0))
form = SQLFORM(db.wp, record, deletable=True,
upload=URL('download'))
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
def download():
return response.download(request, db)
def up_json():
form=SQLFORM(db.wp)
return dict(form=form)
def ABCD(query):
# Esta corre metiendo el siguiente codigo c:\Python27\python.exe c:\web2py\web2py.py -S EssenciaAPI24/default/ABCD
b_lst=[] #crea lista de b con los subtotales
c_lst=[] #crea lista de c contiene los totales por producto
qty_lst=[] #crea lista de cantidades
pres_lst=[] #crea lista de presentaciones
#**************************************QUERY BASE **************************************
#define el query base -> DAL > query
'''query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
query &= db.po.po_number<2432''' #quitar comillas si quiere probar desde la linea de comandos
orders_query_lst=db(query).select(db.po.id, db.po.po_number, groupby='po.po_number').as_list() #obtiene id de los pedidos del query
n=len(orders_query_lst) #obtiene el numero de pedidos de query
d_lst=[str(x['po_number'])+'|Recibido' for x in orders_query_lst] #obtiene las referencias de los pedidos del query
#print orders_query_lst #impresion de prueba
print '\n'
#print d_lst #impresion de prueba
#***************************************QUERY A,B *****************************************
a_product_id_lst=db(query).select(db.product.id, db.product.name, groupby='product.name').as_list() # obtiene id, nombre productos query sin repetir
for i in range (len(a_product_id_lst)): # iterando sobre A: a_products_lst
query_a = query
query_a &= db.product.id==a_product_id_lst[i]['id']
for j in range (n): # iterando sobre orders_query_lst
query_b = query_a
query_b &= db.po.id ==orders_query_lst[j]['id']
#print query_b # impresion de prueba
bj_lst = db(query_b).select(db.po_detail.quantity, orderby='po.po_number', groupby='po.po_number').as_list() #obtiene cantidad
qtyj_lst = db(query_b).select(db.po_detail.quantity, orderby='po.po_number', groupby='po.po_number').as_list() #obtiene cantidad
presj_lst =db(query_b).select(db.product.pres, orderby='po.po_number', groupby='po.po_number').as_list() #obtiene pres
if len(bj_lst)==0: #si el pedido no tiene este producto ponga 0
bj_lst = 0
b_lst.append(0)
else:
b_lst.append(int(bj_lst[0]['quantity'])) # de lo contrario ponga el valor de bj_lst
if len(qtyj_lst)==0: #si no hay cantidad en ese pedido ponga un cero
qtyj_lst=0
presj_lst=0 #ponga un cero en la presentacion
qty_lst.append(0) #ingreselo en la lista de cantidad
pres_lst.append(0) #ingreselo en la lista de presentacion
else: # en caso contrario obtenga los valores de la consultas
qty_lst.append(int(qtyj_lst[0]['quantity'])) # obtiene el numero de cantidad
pres_lst.append(int(presj_lst[0]['pres'])) # obtiene el numero de la presentacion del producto
#print qty_lst #impresion de prueba
#print pres_lst #impresion de prueba
z_lst=[]
z_lst=[qty_lst*pres_lst for qty_lst,pres_lst in zip(qty_lst,pres_lst)] #calcula pres*qty para cada uno de los elementos de la lista
#print z_lst
#print (str('j is:'), j) #impresion de prueba
#print (str('bj_lst is:'), bj_lst) #impresion de prueba
#print (str('b_lst is:'), b_lst) #impresion de prueba
#************************************* IMPRIME TABLA RESUMEN **************************************
a_product_name_lst=db(query).select(db.product.name, groupby='product.name').as_list() #obtiene lista de nombres productos no repetidos en rango
field_names_lst=d_lst #crea una lista con todos los numeros del pedido dentro del rango
field_names_lst.insert(0, "Producto") # agrega al inicio de la lista el titulo producto
field_names_lst.insert(len(field_names_lst),"Total") # Adiciona al final de la lista el titulo total
summary_table=PrettyTable(field_names_lst) # crea la tabla resumen con los titulos de cada columna
total_lst=[]
for y in range (0,len(a_product_id_lst)):
begining_slice=y*n #definicion del inicio del intervalo de corte de la lista
end_slice=begining_slice+n #definicion del fin del intervalo de corte de la lista
row_summary_lst=z_lst[begining_slice:end_slice] #Toma los totales entre el incio y fin del intervalo sin tocar el fin
#si desea solo las cantidades del pedido sin multiplicar por los pesos usar b_lst por Z_lst
total=sum(row_summary_lst) #suma las cantidades de todos los pedidos del rango
row_summary_lst.insert(0,a_product_name_lst[y]['name']) #agrega el nombre al inicio de la lista
row_summary_lst.insert(len(row_summary_lst),total) # agrega el total al final de la lista
summary_table.add_row(row_summary_lst) # agrega filas a la tabla
summary_table.align='l'
#summary_table.align['Producto']='l' # alinea la a la izquierda la primera columna
summary_table.align['Total']='r' # alinea a la derecha la ultima columna
print summary_table # imprime la tabla resumen
with open ('consolidado.txt','w') as w: # escribe la tabla en un archivo txt
w.write(str('ESTE ES EL CONSOLIDADO DE LOS SIGUIENTES PEDIDOS:'))
w.write('\n')
w.write(str(summary_table))
return
def table():
pt = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
pt.align["City name"] = "l" # Left align city names
pt.padding_width = 1 # One space between column edges and contents (default)
pt.add_row(["Adelaide",1295, 1158259, 600.5])
pt.add_row(["Brisbane",5905, 1857594, 1146.4])
pt.add_row(["Darwin", 112, 120900, 1714.7])
pt.add_row(["Hobart", 1357, 205556, 619.5])
pt.add_row(["Sydney", 2058, 4336374, 1214.8])
pt.add_row(["Melbourne", 1566, 3806092, 646.9])
pt.add_row(["Perth", 5386, 1554769, 869.4])
lines = pt.get_string()
with open ('la_tabla.txt','w') as w:
w.write(str(pt))
print pt
return
def merger():
#This functions consolidates all the filtered po's according to the total quatities per product
#1. Performs the filter by dates -> results = type(DAL. query), form= type(DAL, form), msg=type(DAL, string)
#------------------------------------------
#1.0 defines the initial and final dates
date_initial_default = \
datetime.datetime.strptime(request.vars.date_initial, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_inicial else None
date_final_default = \
datetime.datetime.strptime(request.vars.date_final, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_final else None
#1.1 The search form created with .factory
form = SQLFORM.factory(
Field("date_initial", "datetime", default=date_initial_default),
Field("date_final", "datetime", default=date_final_default),
formstyle='divs',
submit_button="Search",
)
#1.2 The base query to fetch all orders of db.po, db.po_details, db.product
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
# 1.3 testing if the form was accepted
if form.process().accepted:
# gathering form submitted values
date_initial = form.vars.date_initial
date_final = form.vars.date_final
# more dynamic conditions in to query
if date_initial:
query &= db.po.date >= date_initial
if date_final:
query &= db.po.date <= date_final
#1.4 counts the total the number of registers
count = db(query).count()
#1.5 returns the query results
results = db(query).select(db.po.po_number,db.po.date,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, db.po.customer_id, orderby='po_number')
#1.6 prints a message with the number of results
msg = T("%s registers" % count )
#2. gets all the products contained within the orders in 1. = A
A=db(query).select(db.product.id, groupby='product.name')
#2.1 convert A to a list
A_rows=A.as_list()
#2.2 gets the list's length and print it
count2 = len(A_rows)
msg2 = T("%s registers" % count2 )
#3. consolidates all the quantities per po for each product = B
#3.1 retrieves the first product.id from A
Ai=A_rows[0]['id']
#3.2 lists all the po.id in the range of dates
orders=db(query).select(db.po.id, orderby='po.po_number',groupby='po.po_number' ).as_list()
#for i, val in enumerate(orders):
for a in orders:
i=a['id']
#i=0
Bj=orders[i]['id']
query_B=query
#3.4 get the total quantity for the product.id(Ai)
query_B &= db.po_detail.product_id==Ai
Bijs=db(query_B).select(db.product.pres *db.po_detail.quantity, groupby='product.name')
#4. gets all the subtotals per product = C
#5. gets all the customers contained within the orders in 1. = D
return dict(results=results, msg=msg, form=form, A=A, msg2=msg2, Ai=Ai,Bijs=Bijs ,orders=orders, i=i)
def iterate():
| |
<gh_stars>0
import datetime
import json
import logging
import time
from typing import Any, Dict, List, cast
from urllib.parse import urljoin
import requests
from dagster import (
EventMetadata,
Failure,
Field,
StringSource,
__version__,
check,
get_dagster_logger,
resource,
)
from dagster.utils.merger import deep_merge_dicts
from requests.exceptions import RequestException
from .types import DbtCloudOutput
DBT_DEFAULT_HOST = "https://cloud.getdbt.com/"
DBT_ACCOUNTS_PATH = "api/v2/accounts/"
# default polling interval (in seconds)
DEFAULT_POLL_INTERVAL = 10
class DbtCloudResourceV2:
"""This class exposes methods on top of the dbt Cloud REST API v2.
For a complete set of documentation on the dbt Cloud Administrative REST API, including expected
response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.
"""
def __init__(
self,
auth_token: str,
account_id: int,
disable_schedule_on_trigger: bool = True,
request_max_retries: int = 3,
request_retry_delay: float = 0.25,
dbt_cloud_host: str = DBT_DEFAULT_HOST,
log: logging.Logger = get_dagster_logger(),
log_requests: bool = False,
):
self._auth_token = auth_token
self._account_id = account_id
self._disable_schedule_on_trigger = disable_schedule_on_trigger
self._request_max_retries = request_max_retries
self._request_retry_delay = request_retry_delay
self._dbt_cloud_host = dbt_cloud_host
self._log = log
self._log_requests = log_requests
@property
def api_base_url(self) -> str:
return urljoin(self._dbt_cloud_host, DBT_ACCOUNTS_PATH)
def make_request(
self, method: str, endpoint: str, data: Dict[str, Any] = None, return_text: bool = False
) -> Dict[str, Any]:
"""
Creates and sends a request to the desired dbt Cloud API endpoint.
Args:
method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").
endpoint (str): The dbt Cloud API endpoint to send this request to.
data (Optional[str]): JSON-formatted data string to be included in the request.
return_text (bool): Override default behavior and return unparsed {"text": response.text}
blob instead of json.
Returns:
Dict[str, Any]: Parsed json data from the response to this request
"""
headers = {
"User-Agent": f"dagster-dbt/{__version__}",
"Content-Type": "application/json",
"Authorization": f"Bearer {self._auth_token}",
}
url = urljoin(self.api_base_url, endpoint)
if self._log_requests:
self._log.debug(f"Making Request: method={method} url={url} data={data}")
num_retries = 0
while True:
try:
response = requests.request(
method=method,
url=url,
headers=headers,
data=json.dumps(data),
allow_redirects=False,
)
response.raise_for_status()
return {"text": response.text} if return_text else response.json()["data"]
except RequestException as e:
self._log.error("Request to dbt Cloud API failed: %s", e)
if num_retries == self._request_max_retries:
break
num_retries += 1
time.sleep(self._request_retry_delay)
raise Failure("Exceeded max number of retries.")
def get_job(self, job_id: int) -> Dict[str, Any]:
"""
Gets details about a given dbt job from the dbt Cloud API.
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
Returns:
Dict[str, Any]: Parsed json data from the response to this request
"""
return self.make_request("GET", f"{self._account_id}/jobs/{job_id}/")
def update_job(self, job_id: int, **kwargs) -> Dict[str, Any]:
"""
Updates specific properties of a dbt job. Documentation on the full set of potential
parameters can be found here: https://docs.getdbt.com/dbt-cloud/api-v2#operation/updateJobById
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
kwargs: Passed in as the properties to be changed.
Returns:
Dict[str, Any]: Parsed json data from the response to this request
Examples:
.. code-block:: python
# disable schedule for job with id=12345
my_dbt_cloud_resource.update_job(12345, triggers={"schedule": False})
"""
# API requires you to supply a bunch of values, so we can just use the current state
# as the defaults
job_data = self.get_job(job_id)
return self.make_request(
"POST", f"{self._account_id}/jobs/{job_id}/", data=deep_merge_dicts(job_data, kwargs)
)
def run_job(self, job_id: int, **kwargs) -> Dict[str, Any]:
"""
Initializes a run for a job. Overrides for specific properties can be set by passing in
values to the kwargs. A full list of overridable properties can be found here:
https://docs.getdbt.com/dbt-cloud/api-v2#operation/triggerRun
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
kwargs: Passed in as the properties to be overridden.
Returns:
Dict[str, Any]: Parsed json data from the response to this request
"""
if self._disable_schedule_on_trigger:
self._log.info("Disabling dbt Cloud job schedule.")
self.update_job(job_id, triggers={"schedule": False})
self._log.info(f"Initializing run for job with job_id={job_id}")
if "cause" not in kwargs:
kwargs["cause"] = "Triggered via Dagster"
resp = self.make_request("POST", f"{self._account_id}/jobs/{job_id}/run/", data=kwargs)
self._log.info(
f"Run initialized with run_id={resp['id']}. View this run in "
f"the dbt Cloud UI: {resp['href']}"
)
return resp
def get_run(self, run_id: int, include_related: List[str] = None) -> Dict[str, Any]:
"""
Gets details about a specific job run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
include_related (List[str]): List of related fields to pull with the run. Valid values
are "trigger", "job", and "debug_logs".
Returns:
Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.
See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.
"""
query_params = f"?include_related={','.join(include_related)}" if include_related else ""
return self.make_request(
"GET",
f"{self._account_id}/runs/{run_id}/{query_params}",
)
def get_run_steps(self, run_id: int) -> List[str]:
"""
Gets the steps of an initialized dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
Returns:
List[str, Any]: List of commands for each step of the run.
"""
run_details = self.get_run(run_id, include_related=["trigger", "job"])
steps = run_details["job"]["execute_steps"]
steps_override = run_details["trigger"]["steps_override"]
return steps_override or steps
def cancel_run(self, run_id: int) -> Dict[str, Any]:
"""
Cancels a dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
Returns:
Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.
See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.
"""
self._log.info(f"Cancelling run with id '{run_id}'")
return self.make_request("POST", f"{self._account_id}/runs/{run_id}/cancel/")
def list_run_artifacts(self, run_id: int, step: int = None) -> List[str]:
"""
Lists the paths of the available run artifacts from a completed dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
step (int): The index of the step in the run to query for artifacts. The first step in
the run has the index 1. If the step parameter is omitted, then this endpoint will
return the artifacts compiled for the last step in the run
Returns:
List[str]: List of the paths of the available run artifacts
"""
query_params = f"?step={step}" if step else ""
return cast(
list,
self.make_request(
"GET",
f"{self._account_id}/runs/{run_id}/artifacts/{query_params}",
data={"step": step} if step else None,
),
)
def get_run_artifact(self, run_id: int, path: str, step: int = None) -> str:
"""
The string contents of a run artifact from a dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
path (str): The path to this run artifact (e.g. 'run/my_new_project/models/example/my_first_dbt_model.sql')
step (int): The index of the step in the run to query for artifacts. The first step in
the run has the index 1. If the step parameter is omitted, then this endpoint will
return the artifacts compiled for the last step in the run.
Returns:
List[str]: List of the names of the available run artifacts
"""
query_params = f"?step={step}" if step else ""
return self.make_request(
"GET",
f"{self._account_id}/runs/{run_id}/artifacts/{path}{query_params}",
data={"step": step} if step else None,
return_text=True,
)["text"]
def get_manifest(self, run_id: int, step: int = None) -> Dict[str, Any]:
"""
The parsed contents of a manifest.json file created | |
<gh_stars>1-10
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: base_ptycho
:platform: Unix
:synopsis: A base class for all ptychographic analysis methods
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from savu.plugins.plugin import Plugin
from savu.plugins.driver.cpu_plugin import CpuPlugin
import logging
import numpy as np
class BasePtycho(Plugin, CpuPlugin): # also make one for gpu
"""
A base plugin for doing ptychography. Other ptychography plugins should \
inherit from this.
:param in_datasets: A list of the dataset(s) to process. Default: [].
:param out_datasets: A list of the dataset(s) to \
process. Default: ['probe', 'object_transmission', 'positions'].
"""
def __init__(self, name):
super(BasePtycho, self).__init__(name)
def setup(self):
self.exp.log(self.name + " Setting up the ptycho")
in_dataset, out_dataset = self.get_datasets()
in_meta_data = in_dataset[0].meta_data# grab the positions from the metadata
logging.debug('getting the positions...')
self.positions = in_meta_data.get('xy') # get the positions and bind them
# lets set up the axis labels for output datasets
position_labels, probe_labels, object_labels, self.sh = self.setup_axis_labels(in_dataset)
# print "probe labels are:"+str(probe_labels)
# print "object labels are:"+str(object_labels)
# print "position labels are:"+str(position_labels)
# Now create the datasets and work out the patterns
### PROBE ###
probe = out_dataset[0]
# probe_shape = in_dataset[0].get_shape()[-2:] + (self.get_num_probe_modes(),)
self.set_size_probe(in_dataset[0].get_shape()[-2:])
logging.debug("##### PROBE #####")
#print("probe shape is:%s",str(self.get_size_probe()))
probe.create_dataset(axis_labels=probe_labels,
shape=self.get_size_probe()) # create the dataset
self.probe_pattern_setup(probe_labels, probe)
### OBJECT ####
self.set_size_object(in_dataset[0], self.get_positions(),
self.get_pixel_size())
object_trans = out_dataset[1]
object_shape = self.sh + self.get_size_object()
logging.debug("##### OBJECT #####")
#print("object shape is:%s",str(object_shape))
# print object_labels
object_trans.create_dataset(axis_labels=object_labels,
shape=object_shape) # create the dataset
self.object_pattern_setup(object_labels, object_trans)
### POSITIONS ###
logging.debug('##### POSITIONS #####')
positions = out_dataset[2]
#print self.sh, self.get_positions().shape
positions_shape = self.sh + self.get_positions().shape[-2:]
logging.debug('positions shape is:%s',str(positions_shape))
#print "positions shape",positions_shape
positions.create_dataset(axis_labels=position_labels,
shape=positions_shape)
rest_pos = range(len(position_labels))
pos_md = \
{'core_dims':tuple(set(rest_pos) - set([0])), 'slice_dims':(0,)}
positions.add_pattern("CHANNEL", **pos_md)
'''
now we need to tell the setup what we want as input shapes, output shapes, and the number of each of them in one go.
'''
in_pData, out_pData = self.get_plugin_datasets()
in_pData[0].plugin_data_setup(self.get_plugin_pattern(), self.get_max_frames())
out_pData[0].plugin_data_setup("PROJECTION", self.get_num_probe_modes())
out_pData[1].plugin_data_setup("PROJECTION", self.get_num_object_modes())
out_pData[2].plugin_data_setup("CHANNEL", self.get_max_frames())
self.exp.log(self.name + " End")
'''
The below methods influence the set-up and can be over-ridden depending on which software package we are using
'''
def get_plugin_pattern(self):
'''
sets the pattern to work in. In this case we consider a ptycho scan to be a 4D_SCAN.
'''
return "4D_SCAN"
def nInput_datasets(self):
return 1
def nOutput_datasets(self):
return 3
def get_num_probe_modes(self):
return 1
def get_num_object_modes(self):
return 1
def get_positions(self):
return self.positions
def get_pixel_size(self):
return 30e-9
def set_size_object(self, dataset,positions, pobj=33e-9):
'''
returns tuple
'''
# print "positions is "+str(self.get_positions().shape)
x,y = self.get_positions()[0],self.get_positions()[1]
probe_size = self.get_size_probe()
x_fov = np.max(x)-np.min(x)
y_fov = np.max(y)-np.min(y)
xsize = int(x_fov//pobj) + probe_size[0]
ysize = int(y_fov//pobj) + probe_size[1]
self.obj_shape = xsize,ysize,self.get_num_object_modes()
def get_size_object(self):
return self.obj_shape
def set_size_probe(self,val):
self.probe_size = (1,)+val + (self.get_num_probe_modes(),)
def get_size_probe(self):
'''
returns tuple
'''
return self.probe_size
def get_max_frames(self):
return 'single'
def get_output_axis_units(self):
return 'nm'
def probe_pattern_setup(self, probe_labels, probe):
'''
This is where we set up the patterns, we need to add, PROJECTIONS, SINOGRAMS, TIMESERIES and SPECTRA
I've created the TIMESERIES because we could in theory have a time series of spectra
probe_patterns: PROJECTION, TIMESERIES (for each projection), SPECTRUM (for each energy)
object_patterns: PROJECTION, SINOGRAM, SPECTRUM (for each energy)
position_patterns: 1D_METADATA
'''
probe_dims = len(probe_labels) # the number of dimensions from the axis labels
rest_probe = range(probe_dims) # all the dimensions we have
self.set_projection_pattern(probe, rest_probe)
self.set_probe_rotation_patterns(probe, rest_probe)
self.set_probe_energy_patterns(probe, rest_probe)
def object_pattern_setup(self, object_labels, object_trans):
'''
This is where we set up the patterns, we need to add, PROJECTIONS, SINOGRAMS, TIMESERIES and SPECTRA
I've created the TIMESERIES because we could in theory have a time series of spectra
probe_patterns: PROJECTION, TIMESERIES (for each projection), SPECTRUM (for each energy)
object_patterns: PROJECTION, SINOGRAM, SPECTRUM (for each energy)
position_patterns: 1D_METADATA
'''
obj_dims = len(object_labels) # the number of dimensions from the axis labels
# print "object has "+str(obj_dims)+"dimensions"
rest_obj = range(obj_dims) # all the dimensions we have
self.set_projection_pattern(object_trans, rest_obj)
self.set_object_rotation_patterns(object_trans, rest_obj)
self.set_object_energy_patterns(object_trans, rest_obj)
def setup_axis_labels(self, in_dataset):
'''
This is where we set up the axis labels
the 4D scan will contain labels that are: 'xy', 'detectorX', 'detectorY', but the data
itself may be scanned in energy or rotation or something else. We want to remove all the above,
and amend them to be the following (preferably with additional scan axes at the front):
probe: 'x','y','mode_idx'
object: 'x','y','mode_idx'
positions: 'xy'
'''
PATTERN_LABELS = ['xy', 'detectorX', 'detectorY']
in_labels = in_dataset[0].data_info.get('axis_labels') # this is a list of dictionarys
existing_labels = [d.keys()[0] for d in in_labels] # this just gets the axes names
logging.debug('The existing labels are:%s, we will remove:%s' % (existing_labels, PATTERN_LABELS))
logging.debug('removing these labels from the list')
core_labels_raw = [l for l in existing_labels if l not in PATTERN_LABELS] # removes them from the list
core_labels = [l + '.' + in_labels[0][l] for l in core_labels_raw] # add the units in for the ones we are keeping
# now we just have to add the new ones to this.
trans_units = self.get_output_axis_units()
probe_labels = list(core_labels) # take a copy
probe_labels.extend(['mode_idx.number','x.' + trans_units, 'y.' + trans_units, ])
logging.debug('the labels for the probe are:%s' % str(probe_labels))
object_labels = list(core_labels)
object_labels.extend(['mode_idx.number','x.' + trans_units, 'y.' + trans_units])
logging.debug('the labels for the object are:%s' % str(object_labels))
position_labels = list(core_labels)
position_labels.extend(['xy.m','idx'])
logging.debug('the labels for the positions are:%s' % str(position_labels))
# now we also need this part of the shape of the data so...
md = in_dataset[0].meta_data
sh = tuple([len(md.get(l)) for l in core_labels_raw])
return position_labels, probe_labels, object_labels, sh
def set_probe_rotation_patterns(self, probe, rest_probe):
try:
rot_axis = probe.get_data_dimension_by_axis_label('rotation_angle', contains=True) # get the rotation axis
except Exception as e:
logging.warn(str(e) + 'we were looking for "rotation_angle"')
logging.debug('This is not a tomography, so no time series for the probe')
else:
# print('the rotation axis is:%s' % str(rot_axis))
probe_ts = {'core_dims':(rot_axis,),
'slice_dims':tuple(set(rest_probe) - set([rot_axis]))}
probe.add_pattern("TIMESERIES", **probe_ts) # so we can FT the wiggles etc...
# print('This is a tomography so I have added a TIMESERIES pattern to the probe') # the probe oscillates in time for each projection, set this as a time series pattern
def set_probe_energy_patterns(self, probe, rest_probe):
try:
energy_axis = probe.get_data_dimension_by_axis_label('energy', contains=True) # get an energy axis
except Exception as e:
logging.warn(str(e) + 'we were looking for "energy"')
logging.debug('This is not spectro-microscopy, so no spectrum/timeseries for the probe')
else:
probe_spec = {'core_dims':tuple(energy_axis), 'slice_dims':tuple(set(rest_probe) - set([energy_axis]))}
probe.add_pattern("SPECTRUM", **probe_spec)
probe.add_pattern("TIMESERIES", **probe_spec)
logging.debug('This is probably spectro-microscopy so I have added a SPECTRUM pattern to the probe')
logging.debug('I have also added a TIMESERIES pattern on the same axis, but be careful with what this means!') # the probe oscillates in time for each projection, set this as a time series pattern
def set_projection_pattern(self, probe, rest_probe):
probe_proj_core = tuple([rest_probe[idx] for idx in (-3, -2)]) # hard coded since we set them just above
probe_slice = tuple(set(rest_probe) - set(probe_proj_core))
probe_proj = {'core_dims':probe_proj_core, 'slice_dims':probe_slice}
probe.add_pattern("PROJECTION", **probe_proj)
logging.debug('have added a PROJECTION pattern')
def set_object_energy_patterns(self, object_trans, rest_obj):
try:
energy_axis = object_trans.get_data_dimension_by_axis_label('energy', contains=True) # get an energy axis
except Exception as e:
logging.warn(str(e) + 'we were looking for "energy"')
logging.debug('This is not spectro-microscopy, so no spectrum for the object')
else:
obj_spec = {'core_dims':tuple(energy_axis), 'slice_dims':tuple(set(rest_obj) - set([energy_axis]))}
object_trans.add_pattern("SPECTRUM", **obj_spec)
logging.debug('This is probably spectro-microscopy so I have added a SPECTRUM pattern to the object') # the probe oscillates in time for each projection, set this as a time series pattern
def set_object_rotation_patterns(self, object_trans, rest_obj):
try:
rot_axis = object_trans.get_data_dimension_by_axis_label('rotation_angle', contains=True) # get the rotation axis
except Exception as e:
logging.warn(str(e) + 'we were looking for "rotation_angle"')
logging.debug('This is not a tomography, so no sinograms for the object transmission')
else:
| |
<reponame>l2ol33rt/salt
# -*- coding: utf-8 -*-
'''
Manage JBoss 7 Application Server via CLI interface
.. versionadded:: 2015.5.0
This state uses the jboss-cli.sh script from a JBoss or Wildfly installation and parses its output to determine the execution result.
In order to run each state, a jboss_config dictionary with the following properties must be passed:
.. code-block:: yaml
jboss:
cli_path: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
controller: 10.11.12.13:9999
cli_user: 'jbossadm'
cli_password: '<PASSWORD>'
If the controller doesn't require a password, then the cli_user and cli_password parameters are optional.
Since same dictionary with configuration will be used in all the states, it may be more convenient to move JBoss configuration and other properties
to the pillar.
Example of application deployment from local filesystem:
.. code-block:: yaml
application_deployed:
jboss7.deployed:
- salt_source:
target_file: '/tmp/webapp.war'
- jboss_config: {{ pillar['jboss'] }}
For the sake of brevity, examples for each state assume that jboss_config is contained in the pillar.
'''
# Import python libs
from __future__ import absolute_import
import time
import logging
import re
import traceback
# Import Salt libs
from salt.utils import dictdiffer
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
def datasource_exists(name, jboss_config, datasource_properties, recreate=False, profile=None):
'''
Ensures that a datasource with given properties exist on the jboss instance.
If datasource doesn't exist, it is created, otherwise only the properties that are different will be updated.
name
Datasource property name
jboss_config
Dict with connection properties (see state description)
datasource_properties
Dict with datasource properties
recreate : False
If set to True and datasource exists it will be removed and created again. However, if there are deployments that depend on the datasource, it will not me possible to remove it.
profile : None
The profile name for this datasource (domain mode only)
Example:
.. code-block:: yaml
sampleDS:
jboss7.datasource_exists:
- recreate: False
- datasource_properties:
driver-name: mysql
connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
jndi-name: 'java:jboss/datasources/sampleDS'
user-name: sampleuser
password: <PASSWORD>
min-pool-size: 3
use-java-context: True
- jboss_config: {{ pillar['jboss'] }}
- profile: full-ha
'''
log.debug(" ======================== STATE: jboss7.datasource_exists (name: %s) ", name)
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
has_changed = False
ds_current_properties = {}
ds_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile)
if ds_result['success']:
ds_current_properties = ds_result['result']
if recreate:
remove_result = __salt__['jboss7.remove_datasource'](jboss_config=jboss_config, name=name, profile=profile)
if remove_result['success']:
ret['changes']['removed'] = name
else:
ret['result'] = False
ret['comment'] = 'Could not remove datasource. Stdout: '+remove_result['stdout']
return ret
has_changed = True # if we are here, we have already made a change
create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile)
if create_result['success']:
ret['changes']['created'] = name
else:
ret['result'] = False
ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout']
return ret
read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile)
if read_result['success']:
ds_new_properties = read_result['result']
else:
ret['result'] = False
ret['comment'] = 'Could not read datasource. Stdout: '+read_result['stdout']
return ret
else:
update_result = __salt__['jboss7.update_datasource'](jboss_config=jboss_config, name=name, new_properties=datasource_properties, profile=profile)
if not update_result['success']:
ret['result'] = False
ret['comment'] = 'Could not update datasource. '+update_result['comment']
# some changes to the datasource may have already been made, therefore we don't quit here
else:
ret['comment'] = 'Datasource updated.'
read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile)
ds_new_properties = read_result['result']
else:
if ds_result['err_code'] == 'JBAS014807': # ok, resource not exists:
create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile)
if create_result['success']:
read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile)
ds_new_properties = read_result['result']
ret['comment'] = 'Datasource created.'
else:
ret['result'] = False
ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout']
else:
raise CommandExecutionError('Unable to handle error: {0}'.format(ds_result['failure-description']))
if ret['result']:
log.debug("ds_new_properties=%s", str(ds_new_properties))
log.debug("ds_current_properties=%s", str(ds_current_properties))
diff = dictdiffer.diff(ds_new_properties, ds_current_properties)
added = diff.added()
if len(added) > 0:
has_changed = True
ret['changes']['added'] = __format_ds_changes(added, ds_current_properties, ds_new_properties)
removed = diff.removed()
if len(removed) > 0:
has_changed = True
ret['changes']['removed'] = __format_ds_changes(removed, ds_current_properties, ds_new_properties)
changed = diff.changed()
if len(changed) > 0:
has_changed = True
ret['changes']['changed'] = __format_ds_changes(changed, ds_current_properties, ds_new_properties)
if not has_changed:
ret['comment'] = 'Datasource not changed.'
return ret
def __format_ds_changes(keys, old_dict, new_dict):
log.debug("__format_ds_changes(keys=%s, old_dict=%s, new_dict=%s)", str(keys), str(old_dict), str(new_dict))
changes = ''
for key in keys:
log.debug("key=%s", str(key))
if key in old_dict and key in new_dict:
changes += key+':'+__get_ds_value(old_dict, key)+'->'+__get_ds_value(new_dict, key)+'\n'
elif key in old_dict:
changes += key+'\n'
elif key in new_dict:
changes += key+':'+__get_ds_value(new_dict, key)+'\n'
return changes
def __get_ds_value(dct, key):
log.debug("__get_value(dict,%s)", key)
if key == "password":
return "***"
elif dct[key] is None:
return 'undefined'
else:
return str(dct[key])
def bindings_exist(name, jboss_config, bindings, profile=None):
'''
Ensures that given JNDI binding are present on the server.
If a binding doesn't exist on the server it will be created.
If it already exists its value will be changed.
jboss_config:
Dict with connection properties (see state description)
bindings:
Dict with bindings to set.
profile:
The profile name (domain mode only)
Example:
.. code-block:: yaml
jndi_entries_created:
jboss7.bindings_exist:
- bindings:
'java:global/sampleapp/environment': 'DEV'
'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties'
- jboss_config: {{ pillar['jboss'] }}
'''
log.debug(" ======================== STATE: jboss7.bindings_exist (name: %s) (profile: %s) ", name, profile)
log.debug('bindings='+str(bindings))
ret = {'name': name,
'result': True,
'changes': {},
'comment': 'Bindings not changed.'}
has_changed = False
for key in bindings:
value = str(bindings[key])
query_result = __salt__['jboss7.read_simple_binding'](binding_name=key, jboss_config=jboss_config, profile=profile)
if query_result['success']:
current_value = query_result['result']['value']
if current_value != value:
update_result = __salt__['jboss7.update_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if update_result['success']:
has_changed = True
__log_binding_change(ret['changes'], 'changed', key, value, current_value)
else:
raise CommandExecutionError(update_result['failure-description'])
else:
if query_result['err_code'] == 'JBAS014807': # ok, resource not exists:
create_result = __salt__['jboss7.create_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if create_result['success']:
has_changed = True
__log_binding_change(ret['changes'], 'added', key, value)
else:
raise CommandExecutionError(create_result['failure-description'])
else:
raise CommandExecutionError(query_result['failure-description'])
if has_changed:
ret['comment'] = 'Bindings changed.'
return ret
def __log_binding_change(changes, type_, key, new, old=None):
if type_ not in changes:
changes[type_] = ''
if old is None:
changes[type_] += key + ':' + new + '\n'
else:
changes[type_] += key + ':' + old + '->' + new + '\n'
def deployed(name, jboss_config, salt_source=None):
'''Ensures that the given application is deployed on server.
jboss_config:
Dict with connection properties (see state description)
salt_source:
How to find the artifact to be deployed.
target_file:
Where to look in the minion's file system for the artifact to be deployed (e.g. '/tmp/application-web-0.39.war'). When source is specified, also specifies where to save the retrieved file.
source:
(optional) File on salt master (e.g. salt://application-web-0.39.war). If absent, no files will be retrieved and the artifact in target_file will be used for the deployment.
undeploy:
(optional) Regular expression to match against existing deployments. When present, if there is a deployment that matches the regular expression, it will be undeployed before the new artifact is deployed.
Examples:
Deployment of a file from minion's local file system:
.. code-block:: yaml
application_deployed:
jboss7.deployed:
- salt_source:
target_file: '/tmp/webapp.war'
- jboss_config: {{ pillar['jboss'] }}
It is assumed that /tmp/webapp.war was made available by some
other means. No applications will be undeployed; if an existing
deployment that shares that name exists, then it will be replaced
with the updated version.
Deployment of a file from the Salt master's file system:
.. code-block:: yaml
application_deployed:
jboss7.deployed:
- salt_source:
source: salt://application-web-0.39.war
target_file: '/tmp/application-web-0.39.war'
undeploy: 'application-web-.*'
- jboss_config: {{ pillar['jboss'] }}
Here, application-web-0.39.war file is downloaded from Salt file system to /tmp/application-web-0.39.war file on minion.
Existing deployments are checked if any of them matches 'application-web-.*' regular expression, and if so then it
is undeployed before deploying the application. This is useful to automate deployment of new application versions.
If the source parameter of salt_source is specified, it can use
any protocol that the file states use. This includes not only
downloading from the master but also HTTP, HTTPS, FTP,
Amazon S3, and OpenStack Swift.
'''
log.debug(" ======================== STATE: jboss7.deployed (name: %s) ", name)
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
comment = ''
validate_success, validate_comment = __validate_arguments(jboss_config, salt_source)
if not validate_success:
return _error(ret, validate_comment)
resolved_source, get_artifact_comment = __get_artifact(salt_source)
log.debug('resolved_source=%s', resolved_source)
log.debug('get_artifact_comment=%s', get_artifact_comment)
comment = __append_comment(new_comment=get_artifact_comment, current_comment=comment)
if resolved_source is None:
return _error(ret, get_artifact_comment)
find_success, deployment, find_comment = __find_deployment(jboss_config, salt_source)
if not find_success:
return _error(ret, find_comment)
log.debug('deployment=%s', deployment)
if deployment is not None:
__salt__['jboss7.undeploy'](jboss_config, deployment)
ret['changes']['undeployed'] = deployment
deploy_result = __salt__['jboss7.deploy'](jboss_config=jboss_config, source_file=resolved_source)
log.debug('deploy_result=%s', str(deploy_result))
if deploy_result['success']:
comment = __append_comment(new_comment='Deployment completed.', current_comment=comment)
ret['comment'] = comment
ret['changes']['deployed'] = resolved_source
else:
comment = __append_comment(new_comment='''Deployment failed\nreturn code={retcode}\nstdout='{stdout}'\nstderr='{stderr}'''.format(**deploy_result), current_comment=comment)
return _error(ret, comment)
return ret
def __validate_arguments(jboss_config, salt_source):
result, comment = __check_dict_contains(jboss_config, 'jboss_config', ['cli_path', 'controller'])
if salt_source is None:
result = False
comment = __append_comment('No salt_source | |
usersDB)
saveDB(bbConfig.bountyDBPath, bountiesDB)
saveDB(bbConfig.guildDBPath, guildsDB)
print(datetime.now().strftime("%H:%M:%S: Data saved manually!"))
await message.channel.send("saved!")
bbCommands.register("save", dev_cmd_save, isDev=True)
dmCommands.register("save", dev_cmd_save, isDev=True)
"""
developer command printing whether or not the current guild has an announcements channel set
@param message -- the discord message calling the command
@param args -- ignored
"""
async def dev_cmd_has_announce(message, args):
guild = guildsDB.getGuild(message.guild.id)
await message.channel.send(":x: Unknown guild!" if guild is None else guild.hasAnnounceChannel())
bbCommands.register("has-announce", dev_cmd_has_announce, isDev=True)
dmCommands.register("has-announce", err_nodm, isDev=True)
"""
developer command printing the current guild's announcements channel if one is set
@param message -- the discord message calling the command
@param args -- ignored
"""
async def dev_cmd_get_announce(message, args):
await message.channel.send("<#" + str(guildsDB.getGuild(message.guild.id).getAnnounceChannelId()) + ">")
bbCommands.register("get-announce", dev_cmd_get_announce, isDev=True)
dmCommands.register("get-announce", err_nodm, isDev=True)
"""
developer command printing whether or not the current guild has a play channel set
@param message -- the discord message calling the command
@param args -- ignored
"""
async def dev_cmd_has_play(message, args):
guild = guildsDB.getGuild(message.guild.id)
await message.channel.send(":x: Unknown guild!" if guild is None else guild.hasPlayChannel())
bbCommands.register("has-play", dev_cmd_has_play, isDev=True)
dmCommands.register("has-play", err_nodm, isDev=True)
"""
developer command printing the current guild's play channel if one is set
@param message -- the discord message calling the command
@param args -- ignored
"""
async def dev_cmd_get_play(message, args):
await message.channel.send("<#" + str(guildsDB.getGuild(message.guild.id).getPlayChannelId()) + ">")
bbCommands.register("get-play", dev_cmd_get_play, isDev=True)
dmCommands.register("get-play", err_nodm, isDev=True)
"""
developer command clearing all active bounties
@param message -- the discord message calling the command
@param args -- ignored
"""
async def dev_cmd_clear_bounties(message, args):
bountiesDB.clearBounties()
await message.channel.send(":ballot_box_with_check: Active bounties cleared!")
bbCommands.register("clear-bounties", dev_cmd_clear_bounties, isDev=True)
dmCommands.register("clear-bounties", dev_cmd_clear_bounties, isDev=True)
"""
developer command printing the calling user's checking cooldown
@param message -- the discord message calling the command
@param args -- ignore
"""
async def dev_cmd_get_cooldown(message, args):
diff = datetime.utcfromtimestamp(usersDB.getUser(message.author.id).bountyCooldownEnd) - datetime.utcnow()
minutes = int(diff.total_seconds() / 60)
seconds = int(diff.total_seconds() % 60)
await message.channel.send(str(usersDB.getUser(message.author.id).bountyCooldownEnd) + " = " + str(minutes) + "m, " + str(seconds) + "s.")
await message.channel.send(datetime.utcfromtimestamp(usersDB.getUser(message.author.id).bountyCooldownEnd).strftime("%Hh%Mm%Ss"))
await message.channel.send(datetime.utcnow().strftime("%Hh%Mm%Ss"))
bbCommands.register("get-cool ", dev_cmd_get_cooldown, isDev=True)
dmCommands.register("get-cool ", dev_cmd_get_cooldown, isDev=True)
"""
developer command resetting the checking cooldown of the calling user, or the specified user if one is given
@param message -- the discord message calling the command
@param args -- string, can be empty or contain a user mention
"""
async def dev_cmd_reset_cooldown(message, args):
# reset the calling user's cooldown if no user is specified
if args == "":
usersDB.getUser(message.author.id).bountyCooldownEnd = datetime.utcnow().timestamp()
# otherwise get the specified user's discord object and reset their cooldown.
# [!] no validation is done.
else:
if "!" in args:
requestedUser = client.get_user(int(args[2:-1]))
else:
requestedUser = client.get_user(int(args[1:-1]))
usersDB.getUser(requestedUser).bountyCooldownEnd = datetime.utcnow().timestamp()
await message.channel.send("Done!")
bbCommands.register("reset-cool", dev_cmd_reset_cooldown, isDev=True)
dmCommands.register("reset-cool", dev_cmd_reset_cooldown, isDev=True)
"""
developer command setting the checking cooldown applied to users
this does not update bbConfig and will be reverted on bot restart
@param message -- the discord message calling the command
@param args -- string containing an integer number of minutes
"""
async def dev_cmd_setcheckcooldown(message, args):
# verify a time was requested
if args == "":
await message.channel.send(":x: please give the number of minutes!")
return
# verify the requested time is an integer
if not bbUtil.isInt(args):
await message.channel.send(":x: that's not a number!")
return
# update the checking cooldown amount
bbConfig.checkCooldown["minutes"] = int(args)
await message.channel.send("Done! *you still need to update the file though* " + message.author.mention)
bbCommands.register("setcheckcooldown", dev_cmd_setcheckcooldown, isDev=True)
dmCommands.register("setcheckcooldown", dev_cmd_setcheckcooldown, isDev=True)
"""
developer command setting the number of minutes in the new bounty generation period
this does not update bbConfig and will be reverted on bot restart
this does not affect the numebr of hours in the new bounty generation period
@param message -- the discord message calling the command
@param args -- string containing an integer number of minutes
"""
async def dev_cmd_setbountyperiodm(message, args):
# verify a time was given
if args == "":
await message.channel.send(":x: please give the number of minutes!")
return
# verify the given time is an integer
if not bbUtil.isInt(args):
await message.channel.send(":x: that's not a number!")
return
# update the new bounty generation cooldown
bbConfig.newBountyFixedDelta["minutes"] = int(args)
bbConfig.newBountyFixedDeltaChanged = True
await message.channel.send("Done! *you still need to update the file though* " + message.author.mention)
bbCommands.register("setbountyperiodm", dev_cmd_setbountyperiodm, isDev=True)
dmCommands.register("setbountyperiodm", dev_cmd_setbountyperiodm, isDev=True)
"""
developer command setting the number of hours in the new bounty generation period
this does not update bbConfig and will be reverted on bot restart
this does not affect the numebr of minutes in the new bounty generation period
@param message -- the discord message calling the command
@param args -- string containing an integer number of hours
"""
async def dev_cmd_setbountyperiodh(message, args):
# verify a time was specified
if args == "":
await message.channel.send(":x: please give the number of minutes!")
return
# verify the given time is an integer
if not bbUtil.isInt(args):
await message.channel.send(":x: that's not a number!")
return
# update the bounty generation period
bbConfig.newBountyFixedDeltaChanged = True
bbConfig.newBountyFixedDelta["hours"] = int(args)
await message.channel.send("Done! *you still need to update the file though* " + message.author.mention)
bbCommands.register("setbountyperiodh", dev_cmd_setbountyperiodh, isDev=True)
dmCommands.register("setbountyperiodh", dev_cmd_setbountyperiodh, isDev=True)
"""
developer command resetting the current bounty generation period,
instantly generating a new bounty
@param message -- the discord message calling the command
@param args -- ignored
"""
async def dev_cmd_resetnewbountycool(message, args):
bbConfig.newBountyDelayReset = True
await message.channel.send(":ballot_box_with_check: New bounty cooldown reset!")
bbCommands.register("resetnewbountycool", dev_cmd_resetnewbountycool, isDev=True)
dmCommands.register("resetnewbountycool", dev_cmd_resetnewbountycool, isDev=True)
"""
developer command printing whether or not the given faction can accept new bounties
@param message -- the discord message calling the command
@param args -- string containing a faction
"""
async def dev_cmd_canmakebounty(message, args):
newFaction = args.lower()
# ensure the given faction exists
if not bountiesDB.factionExists(newFaction):
await message.channel.send("not a faction: '" + newFaction + "'")
else:
await message.channel.send(bountiesDB.factionCanMakeBounty(newFaction.lower()))
bbCommands.register("canmakebounty", dev_cmd_canmakebounty, isDev=True)
dmCommands.register("canmakebounty", dev_cmd_canmakebounty, isDev=True)
"""
developer command sending a message to the playChannel of all guilds that have one
@param message -- the discord message calling the command
@param args -- string containing the message to broadcast
"""
async def dev_cmd_broadcast(message, args):
if args == "":
await message.channel.send("provide a message!")
else:
useAnnounceChannel = False
broadcastEmbed = None
msg = args
if args.split(" ")[0].lower() == "announce-channel":
useAnnounceChannel = True
msg = args[17:]
try:
embedIndex = msg.index("embed=")
except ValueError:
embedIndex = -1
if embedIndex != -1:
msgText = msg[:embedIndex]
else:
msgText = msg
if embedIndex != -1:
msg = msg[embedIndex:]
titleTxt=""
desc=""
footerTxt=""
thumb=""
img=""
authorName=""
icon=""
try:
startIndex=msg.index("titleTxt='")+len("titleTxt=")+1
endIndex=startIndex + msg[msg.index("titleTxt='")+len("titleTxt='"):].index("'")
titleTxt=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("desc='")+len("desc=")+1
endIndex=startIndex + msg[msg.index("desc='")+len("desc='"):].index("'")
desc=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("footerTxt='")+len("footerTxt=")+1
endIndex=startIndex + msg[msg.index("footerTxt='")+len("footerTxt='"):].index("'")
footerTxt=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("thumb='")+len("thumb=")+1
endIndex=startIndex + msg[msg.index("thumb='")+len("thumb='"):].index("'")
thumb=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("img='")+len("img=")+1
endIndex=startIndex + msg[msg.index("img='")+len("img='"):].index("'")
img=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("authorName='")+len("authorName=")+1
endIndex=startIndex + msg[msg.index("authorName='")+len("authorName='"):].index("'")
authorName=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("icon='")+len("icon=")+1
endIndex=startIndex + msg[msg.index("icon='")+len("icon='"):].index("'")
icon=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
broadcastEmbed = makeEmbed(titleTxt=titleTxt, desc=desc, footerTxt=footerTxt, thumb=thumb, img=img, authorName=authorName, icon=icon)
try:
msg.index('\n')
fieldsExist = True
except ValueError:
fieldsExist = False
while fieldsExist:
nextNL = msg.index('\n')
try:
closingNL = nextNL + msg[nextNL+1:].index('\n')
except ValueError:
fieldsExist = False
if fieldsExist:
broadcastEmbed.add_field(name=msg[:nextNL].replace("{NL}", "\n"), value=msg[nextNL+1:closingNL+1].replace("{NL}", "\n"), inline=False)
msg = msg[closingNL+2:]
else:
broadcastEmbed.add_field(name=msg[:nextNL].replace("{NL}", "\n"), value=msg[nextNL+1:].replace("{NL}", "\n"), inline=False)
if useAnnounceChannel:
for guild in guildsDB.guilds.values():
if guild.hasAnnounceChannel():
await client.get_channel(guild.getAnnounceChannelId()).send(msgText, embed=broadcastEmbed)
else:
for guild in guildsDB.guilds.values():
if guild.hasPlayChannel():
await client.get_channel(guild.getPlayChannelId()).send(msgText, embed=broadcastEmbed)
bbCommands.register("broadcast", dev_cmd_broadcast, isDev=True, forceKeepArgsCasing=True)
dmCommands.register("broadcast", dev_cmd_broadcast, isDev=True, forceKeepArgsCasing=True)
"""
developer command sending a message to the same channel as the command is called in
@param message -- the discord message calling the command
@param args -- string containing the message to broadcast
"""
async def dev_cmd_say(message, args):
if args == "":
await message.channel.send("provide a message!")
else:
useAnnounceChannel = False
broadcastEmbed = None
msg = args
if args.split(" ")[0].lower() == "announce-channel":
useAnnounceChannel = True
msg = args[17:]
try:
embedIndex = msg.index("embed=")
except ValueError:
embedIndex = -1
if embedIndex != -1:
msgText = msg[:embedIndex]
else:
msgText = msg
if embedIndex != -1:
msg = msg[embedIndex:]
titleTxt=""
desc=""
footerTxt=""
thumb=""
img=""
authorName=""
icon=""
try:
startIndex=msg.index("titleTxt='")+len("titleTxt=")+1
endIndex=startIndex + msg[msg.index("titleTxt='")+len("titleTxt='"):].index("'")
titleTxt=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("desc='")+len("desc=")+1
endIndex=startIndex + msg[msg.index("desc='")+len("desc='"):].index("'")
desc=msg[startIndex:endIndex].replace("{NL}","\n")
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("footerTxt='")+len("footerTxt=")+1
endIndex=startIndex + msg[msg.index("footerTxt='")+len("footerTxt='"):].index("'")
footerTxt=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("thumb='")+len("thumb=")+1
endIndex=startIndex + msg[msg.index("thumb='")+len("thumb='"):].index("'")
thumb=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("img='")+len("img=")+1
endIndex=startIndex + msg[msg.index("img='")+len("img='"):].index("'")
img=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("authorName='")+len("authorName=")+1
endIndex=startIndex + msg[msg.index("authorName='")+len("authorName='"):].index("'")
authorName=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
try:
startIndex=msg.index("icon='")+len("icon=")+1
endIndex=startIndex + msg[msg.index("icon='")+len("icon='"):].index("'")
icon=msg[startIndex:endIndex]
msg=msg[endIndex+2:]
except ValueError:
pass
broadcastEmbed = makeEmbed(titleTxt=titleTxt, desc=desc, footerTxt=footerTxt, thumb=thumb, img=img, authorName=authorName, icon=icon)
try:
msg.index('\n')
fieldsExist = True
except ValueError:
fieldsExist = False
while fieldsExist:
nextNL = msg.index('\n')
try:
closingNL = nextNL + msg[nextNL+1:].index('\n')
except ValueError:
fieldsExist = False
if fieldsExist:
broadcastEmbed.add_field(name=msg[:nextNL].replace("{NL}", "\n"), value=msg[nextNL+1:closingNL+1].replace("{NL}", "\n"), inline=False)
msg = msg[closingNL+2:]
else:
broadcastEmbed.add_field(name=msg[:nextNL].replace("{NL}", "\n"), value=msg[nextNL+1:].replace("{NL}", "\n"), inline=False)
| |
<filename>train_ic15.py<gh_stars>10-100
import sys
import torch
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import shutil
from torch.autograd import Variable
from torch.utils import data
import os
from dataset import IC15Loader
from metrics import runningScore
import models
from util import Logger, AverageMeter
import time
import util
from tensorboardX import SummaryWriter
def weighted_regression(gaussian_map, gaussian_gt, training_mask, border_map=None, text_thres=0.2, center_thres=0.7):
"""
Weighted MSE-loss
Args:
gaussian_map: gaussian_map from network outputs
gaussian_gt: gt for gaussian_map
training_mask:
"""
gaussian_map = torch.sigmoid(gaussian_map)
text_map = torch.where(gaussian_gt > text_thres, torch.ones_like(gaussian_gt), torch.zeros_like(gaussian_gt))
center_map = torch.where(gaussian_gt > center_thres, torch.ones_like(gaussian_gt), torch.zeros_like(gaussian_gt))
center_gt = torch.where(gaussian_gt > center_thres, gaussian_gt, torch.zeros_like(gaussian_gt))
text_gt = torch.where(gaussian_gt > text_thres, gaussian_gt, torch.zeros_like(gaussian_gt))
bg_map = 1. - text_map
pos_num = torch.sum(text_map)
neg_num = torch.sum(bg_map)
pos_weight = neg_num * 1. / (pos_num + neg_num)
neg_weight = 1. - pos_weight
mse_loss = F.smooth_l1_loss(gaussian_map, gaussian_gt, reduce='none')
weighted_mse_loss = mse_loss * (text_map * pos_weight + bg_map * neg_weight) * training_mask
center_region_loss = torch.sum(center_gt * mse_loss * training_mask) / center_gt.sum() if center_gt.sum() > 0 else 0
text_region_loss = torch.sum(text_gt * mse_loss * training_mask) / text_map.sum() if text_map.sum() > 0 else 0
return weighted_mse_loss.mean(), text_region_loss, center_region_loss
def ohem_single(score, gt_text, training_mask, thres=0.2):
pos_num = (int)(np.sum(gt_text > thres)) - (int)(np.sum((gt_text > thres) & (training_mask <= thres)))
if pos_num == 0:
# selected_mask = gt_text.copy() * 0 # may be not good
selected_mask = training_mask
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_num = (int)(np.sum(gt_text <= thres))
neg_num = (int)(min(pos_num * 3, neg_num))
if neg_num == 0:
selected_mask = training_mask
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_score = score[gt_text <= thres]
neg_score_sorted = np.sort(-neg_score)
threshold = -neg_score_sorted[neg_num - 1]
selected_mask = ((score >= threshold) | (gt_text > thres)) & (training_mask > thres)
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
def ohem_batch(scores, gt_texts, training_masks, thres=0.2):
scores = scores.data.cpu().numpy()
gt_texts = gt_texts.data.cpu().numpy()
training_masks = training_masks.data.cpu().numpy()
scores = np.squeeze(scores, axis=1)
selected_masks = []
for i in range(scores.shape[0]):
selected_masks.append(ohem_single(scores[i, :, :], gt_texts[i, :, :], training_masks[i, :, :], thres))
selected_masks = np.concatenate(selected_masks, 0)
selected_masks = torch.from_numpy(selected_masks).float()
return selected_masks
def dice_loss(input, target, mask):
input = torch.sigmoid(input)
input = input.contiguous().view(input.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1)
mask = mask.contiguous().view(mask.size()[0], -1)
input = input * mask
target = target * mask
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
dice_loss = torch.mean(d)
# a = torch.sum(input * target, 1)
# b = torch.sum(input + target, 1) + 0.001
# d = (2 * a + 1.0) / (b + 1.0)
# dice_loss = torch.mean(d)
return 1 - dice_loss
def cal_text_score(texts, gt_texts, training_masks, running_metric_text, low_thres=0.05, high_thres=0.2):
training_masks = training_masks.data.cpu().numpy()
pred_text = torch.sigmoid(texts[:, 0, :, :]).data.cpu().numpy() * training_masks
pred_text[pred_text <= low_thres] = 0
pred_text[pred_text >= high_thres] = 1
pred_text = pred_text.astype(np.int32)
gt_text = gt_texts.data.cpu().numpy() * training_masks
gt_text[gt_text <= low_thres] = 0
gt_text[gt_text >= high_thres] = 1
gt_text = gt_text.astype(np.int32)
running_metric_text.update(gt_text, pred_text)
score_text, _ = running_metric_text.get_scores()
return score_text
def gaussian_map_loss(gaussian_map, gt_texts, training_masks, criterion, text_thres, center_thres):
weighted_mse_loss, mse_region_loss, loss_center = weighted_regression(gaussian_map, gt_texts, training_masks)
center_gt = torch.where(gt_texts > center_thres, gt_texts, torch.zeros_like(gt_texts))
region_gt = torch.where(gt_texts > text_thres, gt_texts, torch.zeros_like(gt_texts))
# loss for region_map
select_masks = ohem_batch(torch.sigmoid(gaussian_map), region_gt, training_masks).cuda()
loss_region_dice = criterion(gaussian_map, region_gt, select_masks)
loss = loss_center + weighted_mse_loss + mse_region_loss + loss_region_dice
return loss, select_masks
def smooth_l1_loss(input, target, thres=10):
# type: (Tensor, Tensor) -> Tensor
t = torch.abs(input - target)
return torch.where(t < thres, 0.5 * t ** 2, t - 0.5)
def train(train_loader, model, criterion, optimizer, epoch, summary_writer):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
running_metric_text = runningScore(2)
L1_loss = torch.nn.L1Loss()
end = time.time()
for batch_idx, (imgs, gt_texts, training_masks, ori_imgs, border_map, geo_map_gt, densebox_gt) in enumerate(train_loader):
if ori_imgs.cpu().numpy().mean()==0:
continue
data_time.update(time.time() - end)
imgs = Variable(imgs.cuda())
gt_texts = Variable(gt_texts[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::4].cuda())
training_masks = Variable(training_masks[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::4].cuda())
# geo_map_gt = Variable(geo_map_gt[:, :, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::4].cuda())
border_map = Variable(border_map[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::4].cuda())
densebox_map = Variable(densebox_gt[:, :, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::4].cuda())
outputs = model(imgs)
gaussian_large, border_map_pred = outputs
large_map_loss, select_masks = gaussian_map_loss(gaussian_large, gt_texts, training_masks, criterion, text_thres=0.2, center_thres=0.7)
# densebox regression
# densebox_weights = torch.where(torch.abs(densebox_map) > 256, torch.ones_like(densebox_map) * 0.1, torch.ones_like(densebox_map))
# region_gt = torch.where(gt_texts > 0.3, torch.ones_like(gt_texts), torch.zeros_like(gt_texts))
# densebox_loss = smooth_l1_loss(densebox_pred, densebox_map)
# densebox_mask = region_gt[:, None, :, :] * training_masks[:, None, :, :] * densebox_weights
# pos_num = densebox_mask.sum()
# densebox_loss = densebox_loss * densebox_mask
# densebox_loss = 0 if pos_num == 0 else densebox_loss.sum() / (1.0 * pos_num * 8.)
# loss for border_map
select_border_masks = ohem_batch(torch.sigmoid(border_map_pred), border_map, training_masks, 0.5).cuda()
loss_border = criterion(border_map_pred, border_map, select_border_masks + training_masks)
loss = large_map_loss + loss_border
losses.update(loss.item(), imgs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
score_center = cal_text_score(gaussian_large, gt_texts, training_masks, running_metric_text, 0, 0.7)
score_region = cal_text_score(border_map_pred, border_map, training_masks, running_metric_text, 0, 0.2)
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % 40 == 0:
# visualization
global_step = epoch * len(train_loader) + batch_idx
maps = torch.sigmoid(gaussian_large[0:1])
center_map = torch.where(maps > 0.8, maps, torch.zeros_like(maps))
text_map = torch.where(maps > 0.2, maps, torch.zeros_like(maps))
summary_writer.add_images('gt/img', ori_imgs[0:1], global_step=global_step)
summary_writer.add_images('gt/score_map0', torch.unsqueeze(gt_texts[0:1], 1), global_step=global_step)
summary_writer.add_images('gt/training_mask', torch.unsqueeze(training_masks[0:1], 1), global_step=global_step)
summary_writer.add_images('gt/selected_mask', torch.unsqueeze(select_masks[0:1], 1), global_step=global_step)
summary_writer.add_images('gt/selected_border_mask', torch.unsqueeze(select_border_masks[0:1], 1), global_step=global_step)
# summary_writer.add_images('gt/score_map1', torch.unsqueeze(gt_texts[0:1, :, :, 1], 1), global_step=global_step)
# summary_writer.add_images('gt/score_map2', torch.unsqueeze(gt_texts[0:1, :, :, 2], 1), global_step=global_step)
# summary_writer.add_images('gt/center_map', torch.unsqueeze(center_gt[0:1], 1), global_step=global_step)
# summary_writer.add_images('gt/region_map', torch.unsqueeze(region_gt[0:1], 1), global_step=global_step)
# summary_writer.add_images('gt/geo_map', torch.unsqueeze(geo_map_gt[0:1, 0, :, :], 1), global_step=global_step)
summary_writer.add_images('gt/border_map', torch.unsqueeze(border_map[0:1], 1), global_step=global_step)
summary_writer.add_images('predicition/score_map', torch.sigmoid(gaussian_large[0:1]), global_step=global_step)
# summary_writer.add_images('predicition/score_map_large', torch.sigmoid(gaussian_large[0:1]), global_step=global_step)
# summary_writer.add_images('predicition/score_map_small', torch.sigmoid(gaussian_small[0:1]), global_step=global_step)
summary_writer.add_images('predicition/center_map', torch.sigmoid(center_map[0:1]), global_step=global_step)
summary_writer.add_images('predicition/region_map', torch.sigmoid(text_map[0:1]), global_step=global_step)
summary_writer.add_images('predicition/border_map', torch.sigmoid(border_map_pred[0:1, :, :]), global_step=global_step)
# summary_writer.add_scalar('loss/reg_loss', weighted_mse_loss, global_step=global_step)
# summary_writer.add_scalar('loss/reg_center_loss', loss_center, global_step=global_step)
# summary_writer.add_scalar('loss/center_dice_loss', loss_center_dice, global_step=global_step)
# summary_writer.add_scalar('loss/region_dice_loss', loss_region_dice, global_step=global_step)
summary_writer.add_scalar('loss/border_loss', loss_border, global_step=global_step)
summary_writer.add_scalar('loss/large_loss', large_map_loss, global_step=global_step)
# summary_writer.add_scalar('loss/densebox_loss', densebox_loss, global_step=global_step)
# summary_writer.add_scalar('loss/mid_loss', mid_map_loss, global_step=global_step)
# summary_writer.add_scalar('loss/small_loss', small_map_loss, global_step=global_step)
# summary_writer.add_scalar('loss/text_region_loss', mse_region_loss, global_step=global_step)
summary_writer.add_scalar('metric/acc_c', score_center['Mean Acc'], global_step=global_step)
summary_writer.add_scalar('metric/iou_c', score_center['Mean IoU'], global_step=global_step)
summary_writer.add_scalar('metric/acc_t', score_region['Mean Acc'], global_step=global_step)
summary_writer.add_scalar('metric/iou_t', score_region['Mean IoU'], global_step=global_step)
output_log = '({batch}/{size}) Batch: {bt:.3f}s | TOTAL: {total:.0f}min | ETA: {eta:.0f}min | Loss: {loss:.4f} | Acc_c: {acc_c: .4f} | IOU_c: {iou_c: .4f} | Acc_t: {acc_t: .4f} | IOU_t: {iou_t: .4f} '.format(
batch=batch_idx + 1,
size=len(train_loader),
bt=batch_time.avg,
total=batch_time.avg * batch_idx / 60.0,
eta=batch_time.avg * (len(train_loader) - batch_idx) / 60.0,
loss=losses.avg,
acc_c=score_region['Mean Acc'],
iou_c=score_region['Mean IoU'],
acc_t=score_center['Mean Acc'],
iou_t=score_center['Mean IoU'],
)
print(output_log)
sys.stdout.flush()
return (losses.avg, score_center['Mean Acc'], score_center['Mean IoU'])
def adjust_learning_rate(args, optimizer, epoch):
global state
if epoch in args.schedule:
args.lr = args.lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
def save_checkpoint(state, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
def main(args):
if args.checkpoint == '':
args.checkpoint = "checkpoints/ic15_%s_bs_%d_ep_%d"%(args.arch, args.batch_size, args.n_epoch)
if args.pretrain:
if 'synth' in args.pretrain:
args.checkpoint += "_pretrain_synth"
else:
args.checkpoint += "_pretrain_ic17"
print ('checkpoint path: %s'%args.checkpoint)
print ('init lr: %.8f'%args.lr)
print ('schedule: ', args.schedule)
sys.stdout.flush()
if not os.path.isdir(args.checkpoint):
os.makedirs(args.checkpoint)
kernel_num = 1
min_scale = 0.4
start_epoch = 0
data_loader = IC15Loader(root_dir=args.root_dir, is_transform=True, img_size=args.img_size, kernel_num=kernel_num, min_scale=min_scale)
train_loader = torch.utils.data.DataLoader(
data_loader,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
drop_last=True,
pin_memory=True)
if args.arch == "resnet50":
model = models.resnet50(pretrained=True, num_classes=kernel_num)
elif args.arch == "resnet101":
model = models.resnet101(pretrained=True, num_classes=kernel_num)
elif args.arch == "resnet152":
model = models.resnet152(pretrained=True, num_classes=kernel_num)
elif args.arch == "resnet18":
model = models.resnet18(pretrained=True, num_classes=kernel_num)
model = torch.nn.DataParallel(model).cuda()
if hasattr(model.module, 'optimizer'):
optimizer = model.module.optimizer
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.99, weight_decay=5e-4)
title = 'icdar2015'
if args.pretrain:
print('Using pretrained model.')
assert os.path.isfile(args.pretrain), 'Error: no checkpoint directory found!'
checkpoint = torch.load(args.pretrain)
model.load_state_dict(checkpoint['state_dict'], strict=False)
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss','Train Acc.', 'Train IOU.'])
elif args.resume:
print('Resuming from checkpoint.')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'], strict=False)
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print('Training from scratch.')
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss','Train Acc.', 'Train IOU.'])
writer = SummaryWriter(logdir=args.checkpoint)
for epoch in range(start_epoch, args.n_epoch):
adjust_learning_rate(args, optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.n_epoch, optimizer.param_groups[0]['lr']))
train_loss, train_te_acc, train_te_iou = train(train_loader, model, dice_loss, optimizer, epoch, writer)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'lr': args.lr,
'optimizer' : optimizer.state_dict(),
}, checkpoint=args.checkpoint)
# logger.append([optimizer.param_groups[0]['lr'], train_loss, train_te_acc])
# logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='resnet50')
parser.add_argument('--img_size', nargs='?', type=int, default=640,
help='Height of the input image')
parser.add_argument('--n_epoch', nargs='?', type=int, default=50,
help='# of the epochs')
| |
"""Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
BYTECODE_SUFFIXES = "['.pyc']"
DEBUG_BYTECODE_SUFFIXES = "['.pyc']"
EXTENSION_SUFFIXES = "['.cp38-win_amd64.pyd', '.pyd']"
class FileLoader(object):
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def contents(*args,**kw):
pass
def get_data(*args,**kw):
"""Return the data from path as raw bytes."""
pass
def get_filename(*args,**kw):
"""Return the path to the source file as found by the finder."""
pass
def get_resource_reader(*args,**kw):
pass
def is_resource(*args,**kw):
pass
def load_module(*args,**kw):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
pass
def open_resource(*args,**kw):
pass
def resource_path(*args,**kw):
pass
class _LoaderBasics(object):
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def create_module(*args,**kw):
"""Use default semantics for module creation."""
pass
def exec_module(*args,**kw):
"""Execute the module."""
pass
def is_package(*args,**kw):
"""Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'."""
pass
def load_module(*args,**kw):
"""This module is deprecated."""
pass
class SourceLoader(_LoaderBasics):
def _cache_bytecode(*args,**kw):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
pass
def create_module(*args,**kw):
"""Use default semantics for module creation."""
pass
def exec_module(*args,**kw):
"""Execute the module."""
pass
def get_code(*args,**kw):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
pass
def get_source(*args,**kw):
"""Concrete implementation of InspectLoader.get_source."""
pass
def is_package(*args,**kw):
"""Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'."""
pass
def load_module(*args,**kw):
"""This module is deprecated."""
pass
def path_mtime(*args,**kw):
"""Optional method that returns the modification time (an int) for the specified path (a str).
Raises OSError when the path cannot be handled.
"""
pass
def path_stats(*args,**kw):
"""Optional method returning a metadata dict for the specified path (a str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises OSError when the path cannot be handled.
"""
pass
def set_data(*args,**kw):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
pass
def source_to_code(*args,**kw):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
pass
class ExtensionFileLoader(FileLoader,_LoaderBasics):
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def contents(*args,**kw):
pass
def create_module(*args,**kw):
"""Create an unitialized extension module"""
pass
def exec_module(*args,**kw):
"""Initialize an extension module"""
pass
def get_code(*args,**kw):
"""Return None as an extension module cannot create a code object."""
pass
def get_data(*args,**kw):
"""Return the data from path as raw bytes."""
pass
def get_filename(*args,**kw):
"""Return the path to the source file as found by the finder."""
pass
def get_resource_reader(*args,**kw):
pass
def get_source(*args,**kw):
"""Return None as extension modules have no source code."""
pass
def is_package(*args,**kw):
"""Return True if the extension module is a package."""
pass
def is_resource(*args,**kw):
pass
def load_module(*args,**kw):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
pass
def open_resource(*args,**kw):
pass
def resource_path(*args,**kw):
pass
class FileFinder(object):
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def _fill_cache(*args,**kw):
"""Fill the cache of potential modules and packages for this directory."""
pass
def _get_spec(*args,**kw):
pass
def find_loader(*args,**kw):
"""Try to find a loader for the specified module, or the namespace package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
pass
def find_module(*args,**kw):
"""Try to find a loader for the specified module by delegating to self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
pass
def find_spec(*args,**kw):
"""Try to find a spec for the specified module.
Returns the matching spec, or None if not found.
"""
pass
def invalidate_caches(*args,**kw):
"""Invalidate the directory mtime."""
pass
path_hook = "<bound method FileFinder.path_hook of <class '_frozen_importlib_external.FileFinder'>>"
MAGIC_NUMBER = "b'U\r\r\n'"
OPTIMIZED_BYTECODE_SUFFIXES = "['.pyc']"
class PathFinder(object):
"""Meta path finder for sys.path and package __path__ attributes."""
_get_spec = "<bound method PathFinder._get_spec of <class '_frozen_importlib_external.PathFinder'>>"
_legacy_get_spec = "<bound method PathFinder._legacy_get_spec of <class '_frozen_importlib_external.PathFinder'>>"
_path_hooks = "<bound method PathFinder._path_hooks of <class '_frozen_importlib_external.PathFinder'>>"
_path_importer_cache = "<bound method PathFinder._path_importer_cache of <class '_frozen_importlib_external.PathFinder'>>"
find_distributions = "<bound method PathFinder.find_distributions of <class '_frozen_importlib_external.PathFinder'>>"
find_module = "<bound method PathFinder.find_module of <class '_frozen_importlib_external.PathFinder'>>"
find_spec = "<bound method PathFinder.find_spec of <class '_frozen_importlib_external.PathFinder'>>"
invalidate_caches = "<bound method PathFinder.invalidate_caches of <class '_frozen_importlib_external.PathFinder'>>"
SOURCE_SUFFIXES = "['.py', '.pyw']"
class SourceFileLoader(FileLoader,SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def _cache_bytecode(*args,**kw):
pass
def contents(*args,**kw):
pass
def create_module(*args,**kw):
"""Use default semantics for module creation."""
pass
def exec_module(*args,**kw):
"""Execute the module."""
pass
def get_code(*args,**kw):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
pass
def get_data(*args,**kw):
"""Return the data from path as raw bytes."""
pass
def get_filename(*args,**kw):
"""Return the path to the source file as found by the finder."""
pass
def get_resource_reader(*args,**kw):
pass
def get_source(*args,**kw):
"""Concrete implementation of InspectLoader.get_source."""
pass
def is_package(*args,**kw):
"""Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'."""
pass
def is_resource(*args,**kw):
pass
def load_module(*args,**kw):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
pass
def open_resource(*args,**kw):
pass
def path_mtime(*args,**kw):
"""Optional method that returns the modification time (an int) for the specified path (a str).
Raises OSError when the path cannot be handled.
"""
pass
def path_stats(*args,**kw):
"""Return the metadata for the path."""
pass
def resource_path(*args,**kw):
pass
def set_data(*args,**kw):
"""Write bytes data to a file."""
pass
def source_to_code(*args,**kw):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
pass
class SourcelessFileLoader(FileLoader,_LoaderBasics):
"""Loader which handles sourceless file imports."""
def contents(*args,**kw):
pass
def create_module(*args,**kw):
"""Use default semantics for module creation."""
pass
def exec_module(*args,**kw):
"""Execute the module."""
pass
def get_code(*args,**kw):
pass
def get_data(*args,**kw):
"""Return the data from path as raw bytes."""
pass
def get_filename(*args,**kw):
"""Return the path to the source file as found by the finder."""
pass
def get_resource_reader(*args,**kw):
pass
def get_source(*args,**kw):
"""Return None as there is no source code."""
pass
def is_package(*args,**kw):
"""Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'."""
pass
def is_resource(*args,**kw):
pass
def load_module(*args,**kw):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
pass
def open_resource(*args,**kw):
pass
def resource_path(*args,**kw):
pass
class WindowsRegistryFinder(object):
"""Meta path finder for modules declared in the Windows registry."""
DEBUG_BUILD = False
REGISTRY_KEY = """Software\Python\PythonCore\{sys_version}\Modules\{fullname}"""
REGISTRY_KEY_DEBUG = """Software\Python\PythonCore\{sys_version}\Modules\{fullname}\Debug"""
_open_registry = "<bound method WindowsRegistryFinder._open_registry of <class '_frozen_importlib_external.WindowsRegistryFinder'>>"
_search_registry = "<bound method WindowsRegistryFinder._search_registry of <class '_frozen_importlib_external.WindowsRegistryFinder'>>"
find_module = "<bound method WindowsRegistryFinder.find_module of <class '_frozen_importlib_external.WindowsRegistryFinder'>>"
find_spec = "<bound method WindowsRegistryFinder.find_spec of <class '_frozen_importlib_external.WindowsRegistryFinder'>>"
_CASE_INSENSITIVE_PLATFORMS = "('cygwin', 'darwin', 'win')"
_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = "('cygwin', 'darwin')"
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = "('win',)"
class _NamespaceLoader(object):
def create_module(*args,**kw):
"""Use default semantics for module creation."""
pass
def exec_module(*args,**kw):
pass
def get_code(*args,**kw):
pass
def get_source(*args,**kw):
pass
def is_package(*args,**kw):
pass
def load_module(*args,**kw):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
pass
module_repr = "<bound method _NamespaceLoader.module_repr of <class '_frozen_importlib_external._NamespaceLoader'>>"
class _NamespacePath(object):
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __contains__(*args,**kw):
pass
def __getitem__(*args,**kw):
pass
def __setitem__(*args,**kw):
pass
def _find_parent_path_names(*args,**kw):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
pass
def _get_parent_path(*args,**kw):
pass
def _recalculate(*args,**kw):
pass
def append(*args,**kw):
pass
_OPT = """opt-"""
_POPULATE = "<object object at 0x000001AE5B0C40C0>"
_PYCACHE = """__pycache__"""
_RAW_MAGIC_NUMBER = 168627541
_bootstrap = "<module | |
import base64
import binascii
import collections
import copy
import json
import logging
import os
import traceback
from abc import abstractmethod, ABCMeta
from typing import Tuple
import requests
import yaml
from cryptography.exceptions import InvalidTag
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
from RPA.core.helpers import import_by_name, required_env
from .utils import url_join, resolve_path
class RobocorpVaultError(RuntimeError):
"""Raised when there's problem with reading from Robocorp Vault."""
class Secret(collections.abc.Mapping):
"""Container for a secret with name, description, and
multiple key-value pairs. Immutable and avoids logging
internal values when possible.
:param name: Name of secret
:param description: Human-friendly description for secret
:param values: Dictionary of key-value pairs stored in secret
"""
def __init__(self, name, description, values):
self._name = name
self._desc = description
self._dict = collections.OrderedDict(**values)
@property
def name(self):
return self._name
@property
def description(self):
return self._desc
def update(self, kvpairs):
self._dict.update(kvpairs)
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return "Secret(name={name}, keys=[{keys}])".format(
name=self.name, keys=", ".join(str(key) for key in self.keys())
)
class BaseSecretManager(metaclass=ABCMeta):
"""Abstract class for secrets management. Should be used as a
base-class for any adapter implementation.
"""
@abstractmethod
def get_secret(self, secret_name):
"""Return ``Secret`` object with given name."""
@abstractmethod
def set_secret(self, secret: Secret):
"""Set a secret with a new value."""
class FileSecrets(BaseSecretManager):
"""Adapter for secrets stored in a database file. Supports only
plaintext secrets, and should be used mainly for debugging.
The path to the secrets file can be set with the
environment variable ``RPA_SECRET_FILE``, or as
an argument to the library.
The format of the secrets file should be one of the following:
.. code-block:: JSON
{
"name1": {
"key1": "value1",
"key2": "value2"
},
"name2": {
"key1": "value1"
}
}
OR
.. code-block:: YAML
name1:
key1: value1
key2: value2
name2:
key1: value1
"""
SERIALIZERS = {
".json": (json.load, json.dump),
".yaml": (yaml.full_load, yaml.dump),
}
def __init__(self, secret_file="secrets.json"):
self.logger = logging.getLogger(__name__)
path = required_env("RPA_SECRET_FILE", secret_file)
self.logger.info("Resolving path: %s", path)
self.path = resolve_path(path)
extension = self.path.suffix
serializer = self.SERIALIZERS.get(extension)
# NOTE(cmin764): This will raise instead of returning an empty secrets object
# because it is wrong starting from the "env.json" configuration level.
if not serializer:
raise ValueError(
f"Not supported local vault secrets file extension {extension!r}"
)
self._loader, self._dumper = serializer
self.data = self.load()
def load(self):
"""Load secrets file."""
try:
with open(self.path, encoding="utf-8") as fd:
data = self._loader(fd)
if not isinstance(data, dict):
raise ValueError("Invalid content format")
return data
except (IOError, ValueError) as err:
self.logger.error("Failed to load secrets file: %s", err)
return {}
def save(self):
"""Save the secrets content to disk."""
try:
with open(self.path, "w", encoding="utf-8") as f:
if not isinstance(self.data, dict):
raise ValueError("Invalid content format")
self._dumper(self.data, f, indent=4)
except (IOError, ValueError) as err:
self.logger.error("Failed to save secrets file: %s", err)
def get_secret(self, secret_name):
"""Get secret defined with given name from file.
:param secret_name: Name of secret to fetch
:returns: Secret object
:raises KeyError: No secret with given name
"""
values = self.data.get(secret_name)
if values is None:
raise KeyError(f"Undefined secret: {secret_name}")
return Secret(secret_name, "", values)
def set_secret(self, secret: Secret) -> None:
"""Set the secret value in the local Vault
with the given ``Secret`` object.
:param secret: A ``Secret`` object.
:raises IOError, ValueError: Writing the local vault failed.
"""
self.data[secret.name] = dict(secret)
self.save()
class RobocorpVault(BaseSecretManager):
"""Adapter for secrets stored in Robocorp Vault.
The following environment variables should exist:
- RC_API_SECRET_HOST: URL to Robocorp Secrets API
- RC_API_SECRET_TOKEN: API token with access to Robocorp Secrets API
- RC_WORKSPACE_ID: Robocorp Workspace ID
"""
ENCRYPTION_SCHEME = "robocloud-vault-transit-v2"
def __init__(self, *args, **kwargs):
# pylint: disable=unused-argument
self.logger = logging.getLogger(__name__)
# Environment variables set by runner
self._host = required_env("RC_API_SECRET_HOST")
self._token = required_env("RC_API_SECRET_TOKEN")
self._workspace = required_env("RC_WORKSPACE_ID")
# Generated lazily on request
self.__private_key = None
self.__public_bytes = None
@property
def headers(self):
"""Default request headers."""
return {"Authorization": f"Bearer {self._token}"}
@property
def params(self):
"""Default request parameters."""
return {
"encryptionScheme": self.ENCRYPTION_SCHEME,
"publicKey": self._public_bytes,
}
@property
def _private_key(self):
"""Cryptography private key object."""
if self.__private_key is None:
self.__private_key = rsa.generate_private_key(
public_exponent=65537, key_size=4096, backend=default_backend()
)
return self.__private_key
@property
def _public_bytes(self):
"""Serialized public key bytes."""
if self.__public_bytes is None:
self.__public_bytes = base64.b64encode(
self._private_key.public_key().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
return self.__public_bytes
def create_secret_url(self, name):
"""Create a URL for a specific secret."""
return url_join(
self._host, "secrets-v1", "workspaces", self._workspace, "secrets", name
)
def create_public_key_url(self):
"""Create a URL for encryption public key."""
return url_join(
self._host,
"secrets-v1",
"workspaces",
self._workspace,
"secrets",
"publicKey",
)
def get_secret(self, secret_name):
"""Get secret defined with given name from Robocorp Vault.
:param secret_name: Name of secret to fetch
:returns: Secret object
:raises RobocorpVaultError: Error with API request or response payload
"""
url = self.create_secret_url(secret_name)
try:
response = requests.get(url, headers=self.headers, params=self.params)
response.raise_for_status()
payload = response.json()
payload = self._decrypt_payload(payload)
except InvalidTag as e:
self.logger.debug(traceback.format_exc())
raise RobocorpVaultError("Failed to validate authentication tag") from e
except Exception as exc:
self.logger.debug(traceback.format_exc())
raise RobocorpVaultError from exc
return Secret(payload["name"], payload["description"], payload["values"])
def _decrypt_payload(self, payload):
payload = copy.deepcopy(payload)
fields = payload.pop("encryption", None)
if fields is None:
raise KeyError("Missing encryption fields from response")
scheme = fields["encryptionScheme"]
if scheme != self.ENCRYPTION_SCHEME:
raise ValueError(f"Unexpected encryption scheme: {scheme}")
aes_enc = base64.b64decode(fields["encryptedAES"])
aes_tag = base64.b64decode(fields["authTag"])
aes_iv = base64.b64decode(fields["iv"])
# Decrypt AES key using our private key
aes_key = self._private_key.decrypt(
aes_enc,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None,
),
)
# Decrypt actual value using decrypted AES key
ciphertext = base64.b64decode(payload.pop("value")) + aes_tag
data = AESGCM(aes_key).decrypt(binascii.hexlify(aes_iv), ciphertext, b"")
payload["values"] = json.loads(data)
return payload
def set_secret(self, secret: Secret) -> None:
"""Set the secret value in the Vault. Note that the secret possibly
consists of multiple key-value pairs, which will all be overwritten
with the values given here. So don't try to update only one item
of the secret, update all of them.
:param secret: A ``Secret`` object
"""
value, aes_iv, aes_key, aes_tag = self._encrypt_secret_value_with_aes(secret)
pub_key = self.get_publickey()
aes_enc = self._encrypt_aes_key_with_public_rsa(aes_key, pub_key)
payload = {
"description": secret.description,
"encryption": {
"authTag": aes_tag.decode(),
"encryptedAES": aes_enc.decode(),
"encryptionScheme": self.ENCRYPTION_SCHEME,
"iv": aes_iv.decode(),
},
"name": secret.name,
"value": value.decode(),
}
url = self.create_secret_url(secret.name)
try:
response = requests.put(url, headers=self.headers, json=payload)
response.raise_for_status()
except Exception as e:
self.logger.debug(traceback.format_exc())
if response.status_code == 403:
raise RobocorpVaultError(
"Failed to set secret value. Does your token have write access?"
) from e
raise RobocorpVaultError("Failed to set secret value.") from e
def get_publickey(self) -> bytes:
"""Get the public key for AES encryption with the existing token."""
url = self.create_public_key_url()
try:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
except Exception as e:
self.logger.debug(traceback.format_exc())
raise RobocorpVaultError(
"Failed to fetch public key. Is your token valid?"
) from e
return response.content
@staticmethod
def _encrypt_secret_value_with_aes(
secret: Secret,
) -> Tuple[bytes, bytes, bytes, bytes]:
def generate_aes_key() -> Tuple[bytes, bytes]:
aes_key = AESGCM.generate_key(bit_length=256)
aes_iv = os.urandom(16)
return aes_key, aes_iv
def split_auth_tag_from_encrypted_value(
encrypted_value: bytes,
) -> Tuple[bytes, bytes]:
"""AES auth tag is the last 16 bytes of the AES encrypted value.
Split the tag from the value, as that is required for the API.
"""
aes_tag = encrypted_value[-16:]
trimmed_encrypted_value = encrypted_value[:-16]
return trimmed_encrypted_value, aes_tag
value = json.dumps(dict(secret)).encode()
aes_key, aes_iv = generate_aes_key()
encrypted_value = AESGCM(aes_key).encrypt(aes_iv, value, b"")
encrypted_value, aes_tag = split_auth_tag_from_encrypted_value(encrypted_value)
return (
base64.b64encode(encrypted_value),
base64.b64encode(aes_iv),
aes_key,
base64.b64encode(aes_tag),
)
@staticmethod
def _encrypt_aes_key_with_public_rsa(aes_key: bytes, public_rsa: bytes) -> bytes:
pub_decoded = base64.b64decode(public_rsa)
public_key = serialization.load_der_public_key(pub_decoded)
aes_enc = public_key.encrypt(
aes_key,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None,
),
)
return base64.b64encode(aes_enc)
class Vault:
"""`Vault` is a library for interacting with secrets stored in Robocorp
Vault (by default) or file-based secrets, which can be taken into use
by setting some environment variables.
Robocorp Vault works together with Robocorp Worker or Robocorp CLI (RCC).
The following three environment variables need to exist, and are set by
Robocorp Worker automatically and can be set manually with Robocorp CLI.
- ``RC_API_SECRET_HOST``: URL to Robocorp Vault API
- ``RC_API_SECRET_TOKEN``: API Token for Robocorp Vault API
- ``RC_WORKSPACE_ID``: Control Room Workspace ID
File-based secrets can be set by defining two environment variables.
- ``RPA_SECRET_MANAGER``: RPA.Robocorp.Vault.FileSecrets
- ``RPA_SECRET_FILE``: Absolute path to the secrets database file
Example content of local secrets file:
.. code-block:: json
{
"swaglabs": {
"username": "standard_user",
"password": "<PASSWORD>"
}
}
OR
.. code-block:: YAML
swaglabs:
username: standard_user
| |
self.assertEqual(article.keywords(iso_format='iso 639-2'), None)
def test_keywords_without_subfield_k(self):
article = self.article
article.data['article']['v85'] = [{
u"i": u"1",
u"d": u"nd",
u"_": u"",
u"l": u"en"
}]
self.assertEqual(article.keywords(iso_format='iso 639-2'), None)
def test_keywords_without_subfield_l(self):
article = self.article
article.data['article']['v85'] = [{
u"i": u"1",
u"d": u"nd",
u"_": u"",
u"k": u"keyword"
}]
self.assertEqual(article.keywords(iso_format='iso 639-2'), None)
def test_keywords_with_undefined_language(self):
article = self.article
article.data['article']['v85'] = [{
u"i": u"1",
u"d": u"nd",
u"_": u"",
u"k": u"keyword",
u"l": u"xx"
}]
expected = {u'#undefined xx#': [u'keyword']}
self.assertEqual(article.keywords(iso_format='iso 639-2'), expected)
def test_keywords(self):
article = self.article
expected = {'por': [u'Dojo',
u'esp\xe9cies ex\xf3ticas',
u'maturidade sexual',
u'sobreposi\xe7\xe3o de dieta',
u'Estado de S\xe3o Paulo'],
'eng': [u'Oriental weatherfish',
u'exotic species',
u'sexual maturity',
u'diet overlap',
u'S\xe3o Paulo State']
}
self.assertEqual(article.keywords(iso_format='iso 639-2'), expected)
def test_keywords_iso639_2(self):
article = self.article
article.data['article']['v85'] = [
{
"i": "1",
"k": "keyword",
"t": "m",
"_": "",
"l": "en"
},
{
"i": "1",
"k": "palavra-chave",
"t": "m",
"_": "",
"l": "pt"
},
]
expected = {u'pt': [u'palavra-chave'], u'en': [u'keyword']}
self.assertEqual(article.keywords(iso_format=None), expected)
def test_without_citations(self):
article = self.article
del(article.data['citations'])
self.assertEqual(article.citations, None)
def test_translated_titles_without_v12(self):
article = self.article
del(article.data['article']['v12'])
self.assertEqual(article.translated_titles(), None)
def test_translated_titles_iso639_2(self):
article = self.article
article.data['article']['v12'] = [
{
u"l": u"en",
u"_": u"Article Title"
},
{
u"l": u"pt",
u"_": u"Título do Artigo"
}
]
expected = {u'por': u'Título do Artigo'}
self.assertEqual(article.translated_titles(iso_format='iso 639-2'), expected)
def test_translated_titles(self):
article = self.article
article.data['article']['v12'] = [
{
u"l": u"en",
u"_": u"Article Title"
},
{
u"l": u"pt",
u"_": u"Título do Artigo"
}
]
expected = {u'pt': u'Título do Artigo'}
self.assertEqual(article.translated_titles(iso_format=None), expected)
def test_translated_abstracts_without_v83(self):
article = self.article
del(article.data['article']['v83'])
self.assertEqual(article.translated_abstracts(iso_format=None), None)
def test_translated_abtracts_iso639_2(self):
article = self.article
article.data['article']['v83'] = [
{
u"l": u"en",
u"a": u"Article Abstract"
},
{
u"l": u"pt",
u"a": u"Resumo do Artigo"
}
]
expected = {u'por': u'Resumo do Artigo'}
self.assertEqual(article.translated_abstracts(iso_format='iso 639-2'), expected)
def test_translated_abstracts(self):
article = self.article
article.data['article']['v83'] = [
{
u"l": u"en",
u"a": u"Article Abstract"
},
{
u"l": u"pt",
u"a": u"Resumo do Artigo"
}
]
expected = {u'pt': u'Resumo do Artigo'}
self.assertEqual(article.translated_abstracts(iso_format=None), expected)
def test_thesis_degree(self):
self.fulldoc['article']['v51'] = [{u'_': u'Degree 1'}]
article = Article(self.fulldoc)
self.assertEqual(article.thesis_degree, u'Degree 1')
def test_without_thesis_degree(self):
article = Article(self.fulldoc)
self.assertEqual(article.thesis_degree, None)
def test_thesis_organization(self):
self.fulldoc['article']['v52'] = [{u'_': u'It is the thesis organization'}]
article = Article(self.fulldoc)
self.assertEqual(article.thesis_organization, [{u'name': u'It is the thesis organization'}])
def test_thesis_organization_and_division(self):
self.fulldoc['article']['v52'] = [{u'_': u'It is the thesis organization', u'd': u'divisão 1'}]
article = Article(self.fulldoc)
self.assertEqual(article.thesis_organization, [{u'name': u'It is the thesis organization',
u'division': u'divisão 1'}])
def test_thesis_organization_without_name(self):
self.fulldoc['article']['v52'] = [{u'd': u'divisão 1'}]
article = Article(self.fulldoc)
self.assertEqual(article.thesis_organization, [{u'division': u'divisão 1'}])
def test_without_thesis_organization(self):
article = Article(self.fulldoc)
self.assertEqual(article.thesis_organization, None)
@unittest.skip
def test_citations(self):
article = self.article
article.data['citations']
#self.assertTrue(article.citations, Citations)
class CitationTest(unittest.TestCase):
def setUp(self):
path = os.path.dirname(os.path.realpath(__file__))
self.json_citation = json.loads(open('%s/fixtures/sample_citation.json' % path).read())
self.citation = Citation(self.json_citation)
def test_index_number(self):
citation = self.citation
self.assertEqual(citation.index_number, 1)
def test_without_index_number(self):
citation = self.citation
del(citation.data['v701'])
self.assertEqual(citation.index_number, None)
def test_publication_type_article(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
citation = Citation(json_citation)
self.assertEqual(citation.publication_type, u'article')
def test_publication_type_book(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
citation = Citation(json_citation)
self.assertEqual(citation.publication_type, u'book')
def test_publication_type_conference(self):
json_citation = {}
json_citation['v53'] = [{u'_': u'It is the conference title'}]
citation = Citation(json_citation)
self.assertEqual(citation.publication_type, u'conference')
def test_publication_type_thesis(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the thesis title'}]
json_citation['v45'] = [{u'_': u'20120000'}]
citation = Citation(json_citation)
self.assertEqual(citation.publication_type, u'thesis')
def test_publication_type_link(self):
json_citation = {}
json_citation['v37'] = [{u'_': u'http://www.scielo.br'}]
json_citation['v12'] = [{u'_': u'It is the link title'}]
citation = Citation(json_citation)
self.assertEqual(citation.publication_type, u'link')
def test_publication_type_undefined(self):
json_citation = {}
citation = Citation(json_citation)
self.assertEqual(citation.publication_type, u'undefined')
def test_source_journal(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
citation = Citation(json_citation)
self.assertEqual(citation.source, u'It is the journal title')
def test_source_journal_without_journal_title(self):
json_citation = {}
json_citation['v12'] = [{u'_': u'It is the article title'}]
citation = Citation(json_citation)
self.assertEqual(citation.source, None)
def test_source_book_title(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
json_citation['v12'] = [{u'_': u'It is the book chapter'}]
citation = Citation(json_citation)
self.assertEqual(citation.source, u'It is the book title')
def test_article_title(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article chapter'}]
citation = Citation(json_citation)
self.assertEqual(citation.article_title, u'It is the article chapter')
def test_article_without_title(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
citation = Citation(json_citation)
self.assertEqual(citation.article_title, None)
def test_book_chapter_title(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
json_citation['v12'] = [{u'_': u'It is the book chapter'}]
citation = Citation(json_citation)
self.assertEqual(citation.chapter_title, u'It is the book chapter')
def test_book_without_chapter_title(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
citation = Citation(json_citation)
self.assertEqual(citation.chapter_title, None)
def test_thesis_title(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the thesis title'}]
json_citation['v45'] = [{u'_': u'20120000'}]
citation = Citation(json_citation)
self.assertEqual(citation.thesis_title, u'It is the thesis title')
def test_thesis_without_title(self):
json_citation = {}
citation = Citation(json_citation)
self.assertEqual(citation.thesis_title, None)
def test_conference_title(self):
json_citation = {}
json_citation['v53'] = [{u'_': u'It is the conference title'}]
citation = Citation(json_citation)
self.assertEqual(citation.conference_title, u'It is the conference title')
def test_conference_without_title(self):
json_citation = {}
citation = Citation(json_citation)
self.assertEqual(citation.conference_title, None)
def test_link_title(self):
json_citation = {}
json_citation['v37'] = [{u'_': u'http://www.scielo.br'}]
json_citation['v12'] = [{u'_': u'It is the link title'}]
citation = Citation(json_citation)
self.assertEqual(citation.link_title, u'It is the link title')
def test_link_without_title(self):
json_citation = {}
json_citation['v37'] = [{u'_': u'http://www.scielo.br'}]
citation = Citation(json_citation)
self.assertEqual(citation.link_title, None)
def test_conference_sponsor(self):
json_citation = {}
json_citation['v53'] = [{u'_': u'It is the conference title'}]
json_citation['v52'] = [{u'_': u'It is the conference sponsor'}]
citation = Citation(json_citation)
self.assertEqual(citation.conference_sponsor, u'It is the conference sponsor')
def test_conference_without_sponsor(self):
json_citation = {}
json_citation['v52'] = [{u'_': u'It is the conference sponsor'}]
citation = Citation(json_citation)
self.assertEqual(citation.conference_sponsor, None)
def test_link(self):
json_citation = {}
json_citation['v37'] = [{u'_': u'http://www.scielo.br'}]
citation = Citation(json_citation)
self.assertEqual(citation.link, u'http://www.scielo.br')
def test_without_link(self):
json_citation = {}
citation = Citation(json_citation)
self.assertEqual(citation.link, None)
def test_date(self):
json_citation = {}
json_citation['v65'] = [{u'_': u'2012'}]
citation = Citation(json_citation)
self.assertEqual(citation.date, u'2012')
def test_a_link_access_date(self):
json_citation = {}
json_citation['v37'] = [{u'_': u'http://www.scielo.br'}]
json_citation['v110'] = [{u'_': u'201300'}]
json_citation['v65'] = [{u'_': u'2012'}]
citation = Citation(json_citation)
self.assertEqual(citation.date, u'2013')
def test_without_date(self):
json_citation = {}
citation = Citation(json_citation)
self.assertEqual(citation.date, None)
def test_book_edition(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
json_citation['v63'] = [{u'_': u'ed. 1'}]
citation = Citation(json_citation)
self.assertEqual(citation.edition, u'ed. 1')
def test_conference_edition(self):
json_citation = {}
json_citation['v53'] = [{u'_': u'It is the conference title'}]
json_citation['v63'] = [{u'_': u'ed. 1'}]
citation = Citation(json_citation)
self.assertEqual(citation.edition, u'ed. 1')
def test_invalid_edition(self):
json_citation = {}
json_citation['v12'] = [{u'_': u'It is the article title'}]
json_citation['v63'] = [{u'_': u'ed. 1'}]
citation = Citation(json_citation)
self.assertEqual(citation.edition, None)
def test_without_edition(self):
json_citation = {}
citation = Citation(json_citation)
self.assertEqual(citation.edition, None)
def test_issn(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
json_citation['v35'] = [{u'_': u'1234-1234'}]
citation = Citation(json_citation)
self.assertEqual(citation.issn, u'1234-1234')
def test_issn_but_not_an_article(self):
json_citation = {}
json_citation['v35'] = [{u'_': u'1234-1234'}]
citation = Citation(json_citation)
self.assertEqual(citation.issn, None)
def test_isbn(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
json_citation['v12'] = [{u'_': u'It is the chapter title'}]
json_citation['v69'] = [{u'_': u'12341234'}]
citation = Citation(json_citation)
self.assertEqual(citation.isbn, u'12341234')
def test_isbn_but_not_a_book(self):
json_citation = {}
json_citation['v69'] = [{u'_': u'12341234'}]
citation = Citation(json_citation)
self.assertEqual(citation.isbn, None)
def test_book_volume(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
json_citation['v31'] = [{u'_': u'1'}]
citation = Citation(json_citation)
self.assertEqual(citation.volume, u'1')
def test_journal_volume(self):
json_citation = {}
json_citation['v18'] = [{u'_': u'It is the book title'}]
json_citation['v12'] = [{u'_': u'It is the chapter title'}]
json_citation['v31'] = [{u'_': u'1'}]
citation = Citation(json_citation)
self.assertEqual(citation.volume, u'1')
def test_without_volume(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
citation = Citation(json_citation)
self.assertEqual(citation.volume, None)
def test_with_volume_but_not_a_journal_article_neither_a_book(self):
json_citation = {}
json_citation['v31'] = [{u'_': u'1'}]
citation = Citation(json_citation)
self.assertEqual(citation.volume, None)
def test_journal_issue(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
json_citation['v32'] = [{u'_': u'1'}]
citation = Citation(json_citation)
self.assertEqual(citation.issue, u'1')
def test_without_issue(self):
json_citation = {}
json_citation['v30'] = [{u'_': u'It is the journal title'}]
json_citation['v12'] = [{u'_': u'It is the article title'}]
citation = Citation(json_citation)
self.assertEqual(citation.issue, None)
def test_issue_title(self):
json_citation = {}
json_citation['v33'] = [{u'_': u'It is the issue title'}]
| |
(TRUE ? low : high). lParam = PPBRANGE or NULL
PBM_GETPOS = (WM_USER+8)
PBM_SETBARCOLOR = (WM_USER+9) # lParam = bar color
PBM_SETBKCOLOR = CCM_SETBKCOLOR # lParam = bkColor
HOTKEYF_SHIFT = 1
HOTKEYF_CONTROL = 2
HOTKEYF_ALT = 4
HOTKEYF_EXT = 128
HOTKEYF_EXT = 8
HKCOMB_NONE = 1
HKCOMB_S = 2
HKCOMB_C = 4
HKCOMB_A = 8
HKCOMB_SC = 16
HKCOMB_SA = 32
HKCOMB_CA = 64
HKCOMB_SCA = 128
HKM_SETHOTKEY = (WM_USER+1)
HKM_GETHOTKEY = (WM_USER+2)
HKM_SETRULES = (WM_USER+3)
HOTKEY_CLASSA = "msctls_hotkey32"
HOTKEY_CLASS = HOTKEY_CLASSA
CCS_TOP = 0x00000001
CCS_NOMOVEY = 0x00000002
CCS_BOTTOM = 0x00000003
CCS_NORESIZE = 0x00000004
CCS_NOPARENTALIGN = 0x00000008
CCS_ADJUSTABLE = 0x00000020
CCS_NODIVIDER = 0x00000040
CCS_VERT = 0x00000080
CCS_LEFT = (CCS_VERT | CCS_TOP)
CCS_RIGHT = (CCS_VERT | CCS_BOTTOM)
CCS_NOMOVEX = (CCS_VERT | CCS_NOMOVEY)
WC_LISTVIEWA = "SysListView32"
WC_LISTVIEW = WC_LISTVIEWA
LVS_ICON = 0
LVS_REPORT = 1
LVS_SMALLICON = 2
LVS_LIST = 3
LVS_TYPEMASK = 3
LVS_SINGLESEL = 4
LVS_SHOWSELALWAYS = 8
LVS_SORTASCENDING = 16
LVS_SORTDESCENDING = 32
LVS_SHAREIMAGELISTS = 64
LVS_NOLABELWRAP = 128
LVS_AUTOARRANGE = 256
LVS_EDITLABELS = 512
LVS_OWNERDATA = 4096
LVS_NOSCROLL = 8192
LVS_TYPESTYLEMASK = 64512
LVS_ALIGNTOP = 0
LVS_ALIGNLEFT = 2048
LVS_ALIGNMASK = 3072
LVS_OWNERDRAWFIXED = 1024
LVS_NOCOLUMNHEADER = 16384
LVS_NOSORTHEADER = 32768
LVM_SETUNICODEFORMAT = CCM_SETUNICODEFORMAT
LVM_GETUNICODEFORMAT = CCM_GETUNICODEFORMAT
LVM_GETBKCOLOR = (LVM_FIRST + 0)
LVM_SETBKCOLOR = (LVM_FIRST + 1)
LVM_GETIMAGELIST = (LVM_FIRST + 2)
LVSIL_NORMAL = 0
LVSIL_SMALL = 1
LVSIL_STATE = 2
LVM_SETIMAGELIST = (LVM_FIRST + 3)
LVM_GETITEMCOUNT = (LVM_FIRST + 4)
LVIF_TEXT = 1
LVIF_IMAGE = 2
LVIF_PARAM = 4
LVIF_STATE = 8
LVIF_INDENT = 16
LVIF_NORECOMPUTE = 2048
LVIS_FOCUSED = 1
LVIS_SELECTED = 2
LVIS_CUT = 4
LVIS_DROPHILITED = 8
LVIS_ACTIVATING = 32
LVIS_OVERLAYMASK = 3840
LVIS_STATEIMAGEMASK = 61440
I_INDENTCALLBACK = (-1)
LPSTR_TEXTCALLBACKA = -1
LPSTR_TEXTCALLBACK = LPSTR_TEXTCALLBACKA
I_IMAGECALLBACK = (-1)
LVM_GETITEMA = (LVM_FIRST + 5)
LVM_GETITEMW = (LVM_FIRST + 75)
LVM_GETITEM = LVM_GETITEMW
LVM_GETITEM = LVM_GETITEMA
LVM_SETITEMA = (LVM_FIRST + 6)
LVM_SETITEMW = (LVM_FIRST + 76)
LVM_SETITEM = LVM_SETITEMW
LVM_SETITEM = LVM_SETITEMA
LVM_INSERTITEMA = (LVM_FIRST + 7)
LVM_INSERTITEMW = (LVM_FIRST + 77)
LVM_INSERTITEM = LVM_INSERTITEMA
LVM_DELETEITEM = (LVM_FIRST + 8)
LVM_DELETEALLITEMS = (LVM_FIRST + 9)
LVM_GETCALLBACKMASK = (LVM_FIRST + 10)
LVM_SETCALLBACKMASK = (LVM_FIRST + 11)
LVNI_ALL = 0
LVNI_FOCUSED = 1
LVNI_SELECTED = 2
LVNI_CUT = 4
LVNI_DROPHILITED = 8
LVNI_ABOVE = 256
LVNI_BELOW = 512
LVNI_TOLEFT = 1024
LVNI_TORIGHT = 2048
LVM_GETNEXTITEM = (LVM_FIRST + 12)
LVFI_PARAM = 1
LVFI_STRING = 2
LVFI_PARTIAL = 8
LVFI_WRAP = 32
LVFI_NEARESTXY = 64
LVM_FINDITEMA = (LVM_FIRST + 13)
LVM_FINDITEMW = (LVM_FIRST + 83)
LVM_FINDITEM = LVM_FINDITEMA
LVIR_BOUNDS = 0
LVIR_ICON = 1
LVIR_LABEL = 2
LVIR_SELECTBOUNDS = 3
LVM_GETITEMRECT = (LVM_FIRST + 14)
LVM_SETITEMPOSITION = (LVM_FIRST + 15)
LVM_GETITEMPOSITION = (LVM_FIRST + 16)
LVM_GETSTRINGWIDTHA = (LVM_FIRST + 17)
LVM_GETSTRINGWIDTHW = (LVM_FIRST + 87)
LVM_GETSTRINGWIDTH = LVM_GETSTRINGWIDTHA
LVHT_NOWHERE = 1
LVHT_ONITEMICON = 2
LVHT_ONITEMLABEL = 4
LVHT_ONITEMSTATEICON = 8
LVHT_ONITEM = (LVHT_ONITEMICON | LVHT_ONITEMLABEL | LVHT_ONITEMSTATEICON)
LVHT_ABOVE = 8
LVHT_BELOW = 16
LVHT_TORIGHT = 32
LVHT_TOLEFT = 64
LVM_HITTEST = (LVM_FIRST + 18)
LVM_ENSUREVISIBLE = (LVM_FIRST + 19)
LVM_SCROLL = (LVM_FIRST + 20)
LVM_REDRAWITEMS = (LVM_FIRST + 21)
LVA_DEFAULT = 0
LVA_ALIGNLEFT = 1
LVA_ALIGNTOP = 2
LVA_SNAPTOGRID = 5
LVM_ARRANGE = (LVM_FIRST + 22)
LVM_EDITLABELA = (LVM_FIRST + 23)
LVM_EDITLABELW = (LVM_FIRST + 118)
LVM_EDITLABEL = LVM_EDITLABELW
LVM_EDITLABEL = LVM_EDITLABELA
LVM_GETEDITCONTROL = (LVM_FIRST + 24)
LVCF_FMT = 1
LVCF_WIDTH = 2
LVCF_TEXT = 4
LVCF_SUBITEM = 8
LVCF_IMAGE = 16
LVCF_ORDER = 32
LVCFMT_LEFT = 0
LVCFMT_RIGHT = 1
LVCFMT_CENTER = 2
LVCFMT_JUSTIFYMASK = 3
LVCFMT_IMAGE = 2048
LVCFMT_BITMAP_ON_RIGHT = 4096
LVCFMT_COL_HAS_IMAGES = 32768
LVM_GETCOLUMNA = (LVM_FIRST + 25)
LVM_GETCOLUMNW = (LVM_FIRST + 95)
LVM_GETCOLUMN = LVM_GETCOLUMNA
LVM_SETCOLUMNA = (LVM_FIRST + 26)
LVM_SETCOLUMNW = (LVM_FIRST + 96)
LVM_SETCOLUMN = LVM_SETCOLUMNA
LVM_INSERTCOLUMNA = (LVM_FIRST + 27)
LVM_INSERTCOLUMNW = (LVM_FIRST + 97)
LVM_INSERTCOLUMN = LVM_INSERTCOLUMNA
LVM_DELETECOLUMN = (LVM_FIRST + 28)
LVM_GETCOLUMNWIDTH = (LVM_FIRST + 29)
LVSCW_AUTOSIZE = -1
LVSCW_AUTOSIZE_USEHEADER = -2
LVM_SETCOLUMNWIDTH = (LVM_FIRST + 30)
LVM_GETHEADER = (LVM_FIRST + 31)
LVM_CREATEDRAGIMAGE = (LVM_FIRST + 33)
LVM_GETVIEWRECT = (LVM_FIRST + 34)
LVM_GETTEXTCOLOR = (LVM_FIRST + 35)
LVM_SETTEXTCOLOR = (LVM_FIRST + 36)
LVM_GETTEXTBKCOLOR = (LVM_FIRST + 37)
LVM_SETTEXTBKCOLOR = (LVM_FIRST + 38)
LVM_GETTOPINDEX = (LVM_FIRST + 39)
LVM_GETCOUNTPERPAGE = (LVM_FIRST + 40)
LVM_GETORIGIN = (LVM_FIRST + 41)
LVM_UPDATE = (LVM_FIRST + 42)
LVM_SETITEMSTATE = (LVM_FIRST + 43)
LVM_GETITEMSTATE = (LVM_FIRST + 44)
LVM_GETITEMTEXTA = (LVM_FIRST + 45)
LVM_GETITEMTEXTW = (LVM_FIRST + 115)
LVM_GETITEMTEXT = LVM_GETITEMTEXTW
LVM_GETITEMTEXT = LVM_GETITEMTEXTA
LVM_SETITEMTEXTA = (LVM_FIRST + 46)
LVM_SETITEMTEXTW = (LVM_FIRST + 116)
LVM_SETITEMTEXT = LVM_SETITEMTEXTW
LVM_SETITEMTEXT = LVM_SETITEMTEXTA
LVSICF_NOINVALIDATEALL = 1
LVSICF_NOSCROLL = 2
LVM_SETITEMCOUNT = (LVM_FIRST + 47)
LVM_SORTITEMS = (LVM_FIRST + 48)
LVM_SETITEMPOSITION32 = (LVM_FIRST + 49)
LVM_GETSELECTEDCOUNT = (LVM_FIRST + 50)
LVM_GETITEMSPACING = (LVM_FIRST + 51)
LVM_GETISEARCHSTRINGA = (LVM_FIRST + 52)
LVM_GETISEARCHSTRINGW = (LVM_FIRST + 117)
LVM_GETISEARCHSTRING = LVM_GETISEARCHSTRINGA
LVM_SETICONSPACING = (LVM_FIRST + 53)
LVM_SETEXTENDEDLISTVIEWSTYLE = (LVM_FIRST + 54) # optional wParam == mask
LVM_GETEXTENDEDLISTVIEWSTYLE = (LVM_FIRST + 55)
LVS_EX_GRIDLINES = 1
LVS_EX_SUBITEMIMAGES = 2
LVS_EX_CHECKBOXES = 4
LVS_EX_TRACKSELECT = 8
LVS_EX_HEADERDRAGDROP = 16
LVS_EX_FULLROWSELECT = 32 # applies to report mode only
LVS_EX_ONECLICKACTIVATE = 64
LVS_EX_TWOCLICKACTIVATE = 128
LVS_EX_FLATSB = 256
LVS_EX_REGIONAL = 512
LVS_EX_INFOTIP = 1024 # listview does InfoTips for you
LVS_EX_UNDERLINEHOT = 2048
LVS_EX_UNDERLINECOLD = 4096
LVS_EX_MULTIWORKAREAS = 8192
LVM_GETSUBITEMRECT = (LVM_FIRST + 56)
LVM_SUBITEMHITTEST = (LVM_FIRST + 57)
LVM_SETCOLUMNORDERARRAY = (LVM_FIRST + 58)
LVM_GETCOLUMNORDERARRAY = (LVM_FIRST + 59)
LVM_SETHOTITEM = (LVM_FIRST + 60)
LVM_GETHOTITEM = (LVM_FIRST + 61)
LVM_SETHOTCURSOR = (LVM_FIRST + 62)
LVM_GETHOTCURSOR = (LVM_FIRST + 63)
LVM_APPROXIMATEVIEWRECT = (LVM_FIRST + 64)
LV_MAX_WORKAREAS = 16
LVM_SETWORKAREAS = (LVM_FIRST + 65)
LVM_GETWORKAREAS = (LVM_FIRST + 70)
LVM_GETNUMBEROFWORKAREAS = (LVM_FIRST + 73)
LVM_GETSELECTIONMARK = (LVM_FIRST + 66)
LVM_SETSELECTIONMARK = (LVM_FIRST + 67)
LVM_SETHOVERTIME = (LVM_FIRST + 71)
LVM_GETHOVERTIME = (LVM_FIRST + 72)
LVM_SETTOOLTIPS = (LVM_FIRST + 74)
LVM_GETTOOLTIPS = (LVM_FIRST + 78)
LVBKIF_SOURCE_NONE = 0
LVBKIF_SOURCE_HBITMAP = 1
LVBKIF_SOURCE_URL = 2
LVBKIF_SOURCE_MASK = 3
LVBKIF_STYLE_NORMAL = 0
LVBKIF_STYLE_TILE = 16
LVBKIF_STYLE_MASK = 16
LVM_SETBKIMAGEA = (LVM_FIRST + 68)
LVM_SETBKIMAGEW = (LVM_FIRST + 138)
LVM_GETBKIMAGEA = (LVM_FIRST + 69)
LVM_GETBKIMAGEW = (LVM_FIRST + 139)
LVKF_ALT = 1
LVKF_CONTROL = 2
LVKF_SHIFT = 4
LVN_ITEMCHANGING = (LVN_FIRST-0)
LVN_ITEMCHANGED = (LVN_FIRST-1)
LVN_INSERTITEM = (LVN_FIRST-2)
LVN_DELETEITEM = (LVN_FIRST-3)
LVN_DELETEALLITEMS = (LVN_FIRST-4)
LVN_BEGINLABELEDITA = (LVN_FIRST-5)
LVN_BEGINLABELEDITW = (LVN_FIRST-75)
LVN_ENDLABELEDITA = (LVN_FIRST-6)
LVN_ENDLABELEDITW = (LVN_FIRST-76)
LVN_COLUMNCLICK = (LVN_FIRST-8)
LVN_BEGINDRAG = (LVN_FIRST-9)
LVN_BEGINRDRAG = (LVN_FIRST-11)
LVN_ODCACHEHINT = (LVN_FIRST-13)
LVN_ODFINDITEMA = (LVN_FIRST-52)
LVN_ODFINDITEMW = (LVN_FIRST-79)
LVN_ITEMACTIVATE = (LVN_FIRST-14)
LVN_ODSTATECHANGED = (LVN_FIRST-15)
LVN_ODFINDITEM = LVN_ODFINDITEMA
LVN_HOTTRACK = (LVN_FIRST-21)
LVN_GETDISPINFOA = (LVN_FIRST-50)
LVN_GETDISPINFOW = (LVN_FIRST-77)
LVN_SETDISPINFOA = (LVN_FIRST-51)
LVN_SETDISPINFOW = (LVN_FIRST-78)
LVN_BEGINLABELEDIT = LVN_BEGINLABELEDITA
LVN_ENDLABELEDIT = LVN_ENDLABELEDITA
LVN_GETDISPINFO = LVN_GETDISPINFOA
LVN_SETDISPINFO = LVN_SETDISPINFOA
LVIF_DI_SETITEM = 4096
LVN_KEYDOWN = (LVN_FIRST-55)
LVN_MARQUEEBEGIN = (LVN_FIRST-56)
LVGIT_UNFOLDED = 1
LVN_GETINFOTIPA = (LVN_FIRST-57)
LVN_GETINFOTIPW = (LVN_FIRST-58)
LVN_GETINFOTIP = LVN_GETINFOTIPA
WC_TREEVIEWA = "SysTreeView32"
WC_TREEVIEW = WC_TREEVIEWA
TVS_HASBUTTONS = 1
TVS_HASLINES = 2
TVS_LINESATROOT = 4
TVS_EDITLABELS = 8
TVS_DISABLEDRAGDROP = 16
TVS_SHOWSELALWAYS = 32
TVS_RTLREADING = 64
TVS_NOTOOLTIPS = 128
TVS_CHECKBOXES = 256
TVS_TRACKSELECT = 512
TVS_SINGLEEXPAND = 1024
TVS_INFOTIP = 2048
TVS_FULLROWSELECT = 4096
TVS_NOSCROLL = 8192
TVS_NONEVENHEIGHT = 16384
TVIF_TEXT = 1
TVIF_IMAGE = 2
TVIF_PARAM = 4
TVIF_STATE = 8
TVIF_HANDLE = 16
TVIF_SELECTEDIMAGE = 32
TVIF_CHILDREN = 64
TVIF_INTEGRAL = 128
TVIS_SELECTED = 2
TVIS_CUT = 4
TVIS_DROPHILITED = 8
TVIS_BOLD = 16
TVIS_EXPANDED = 32
TVIS_EXPANDEDONCE = 64
TVIS_EXPANDPARTIAL = 128
TVIS_OVERLAYMASK = 3840
TVIS_STATEIMAGEMASK = 61440
TVIS_USERMASK = 61440
I_CHILDRENCALLBACK = (-1)
TVI_ROOT = -65536
TVI_FIRST = -65535
TVI_LAST = -65534
TVI_SORT = -65533
TVM_INSERTITEMA = (TV_FIRST + 0)
TVM_INSERTITEMW = (TV_FIRST + 50)
TVM_INSERTITEM = TVM_INSERTITEMW
TVM_INSERTITEM = TVM_INSERTITEMA
TVM_DELETEITEM = (TV_FIRST + 1)
TVM_EXPAND = (TV_FIRST + 2)
TVE_COLLAPSE = 1
TVE_EXPAND = 2
TVE_TOGGLE = 3
TVE_EXPANDPARTIAL = 16384
TVE_COLLAPSERESET = 32768
TVM_GETITEMRECT = (TV_FIRST + 4)
TVM_GETCOUNT = (TV_FIRST + 5)
TVM_GETINDENT = (TV_FIRST + 6)
TVM_SETINDENT = (TV_FIRST + 7)
TVM_GETIMAGELIST = (TV_FIRST + 8)
TVSIL_NORMAL = 0
TVSIL_STATE = 2
TVM_SETIMAGELIST = (TV_FIRST + 9)
TVM_GETNEXTITEM = (TV_FIRST + 10)
TVGN_ROOT = 0
TVGN_NEXT = 1
TVGN_PREVIOUS = 2
TVGN_PARENT = 3
TVGN_CHILD = 4
TVGN_FIRSTVISIBLE = 5
TVGN_NEXTVISIBLE = 6
TVGN_PREVIOUSVISIBLE = 7
TVGN_DROPHILITE = 8
TVGN_CARET = 9
TVGN_LASTVISIBLE = 10
TVM_SELECTITEM = (TV_FIRST + 11)
TVM_GETITEMA = (TV_FIRST + 12)
TVM_GETITEMW = (TV_FIRST + 62)
TVM_GETITEM = TVM_GETITEMW
TVM_GETITEM = TVM_GETITEMA
TVM_SETITEMA = (TV_FIRST + 13)
TVM_SETITEMW = (TV_FIRST + 63)
TVM_SETITEM = TVM_SETITEMW
TVM_SETITEM = TVM_SETITEMA
TVM_EDITLABELA = (TV_FIRST + 14)
TVM_EDITLABELW = (TV_FIRST + 65)
TVM_EDITLABEL = TVM_EDITLABELW
TVM_EDITLABEL = TVM_EDITLABELA
TVM_GETEDITCONTROL = (TV_FIRST + 15)
TVM_GETVISIBLECOUNT = (TV_FIRST + 16)
TVM_HITTEST = (TV_FIRST + 17)
TVHT_NOWHERE = 1
TVHT_ONITEMICON = 2
TVHT_ONITEMLABEL = 4
TVHT_ONITEMINDENT = 8
TVHT_ONITEMBUTTON = 16
TVHT_ONITEMRIGHT = 32
TVHT_ONITEMSTATEICON = 64
TVHT_ABOVE = 256
TVHT_BELOW = 512
TVHT_TORIGHT = 1024
TVHT_TOLEFT = 2048
TVHT_ONITEM = (TVHT_ONITEMICON | TVHT_ONITEMLABEL | TVHT_ONITEMSTATEICON)
TVM_CREATEDRAGIMAGE = (TV_FIRST + 18)
TVM_SORTCHILDREN = (TV_FIRST + 19)
TVM_ENSUREVISIBLE = (TV_FIRST + 20)
TVM_SORTCHILDRENCB = (TV_FIRST + 21)
TVM_ENDEDITLABELNOW = (TV_FIRST + 22)
TVM_GETISEARCHSTRINGA = (TV_FIRST + 23)
TVM_GETISEARCHSTRINGW = (TV_FIRST + 64)
TVM_GETISEARCHSTRING = TVM_GETISEARCHSTRINGA
TVM_SETTOOLTIPS = (TV_FIRST + 24)
TVM_GETTOOLTIPS = (TV_FIRST + 25)
TVM_SETINSERTMARK = (TV_FIRST + 26)
TVM_SETUNICODEFORMAT = CCM_SETUNICODEFORMAT
TVM_GETUNICODEFORMAT = CCM_GETUNICODEFORMAT
TVM_SETITEMHEIGHT = (TV_FIRST + 27)
TVM_GETITEMHEIGHT = (TV_FIRST + 28)
TVM_SETBKCOLOR = (TV_FIRST + 29)
TVM_SETTEXTCOLOR = (TV_FIRST + 30)
TVM_GETBKCOLOR = (TV_FIRST + 31)
TVM_GETTEXTCOLOR = (TV_FIRST + 32)
TVM_SETSCROLLTIME = (TV_FIRST + 33)
TVM_GETSCROLLTIME = (TV_FIRST + 34)
TVM_SETINSERTMARKCOLOR = (TV_FIRST + 37)
TVM_GETINSERTMARKCOLOR = (TV_FIRST + 38)
TVN_SELCHANGINGA = (TVN_FIRST-1)
TVN_SELCHANGINGW = (TVN_FIRST-50)
TVN_SELCHANGEDA = (TVN_FIRST-2)
TVN_SELCHANGEDW = (TVN_FIRST-51)
TVC_UNKNOWN = 0
TVC_BYMOUSE = 1
TVC_BYKEYBOARD = 2
TVN_GETDISPINFOA = (TVN_FIRST-3)
TVN_GETDISPINFOW = (TVN_FIRST-52)
TVN_SETDISPINFOA = (TVN_FIRST-4)
TVN_SETDISPINFOW = (TVN_FIRST-53)
TVIF_DI_SETITEM = 4096
TVN_ITEMEXPANDINGA = (TVN_FIRST-5)
TVN_ITEMEXPANDINGW = (TVN_FIRST-54)
TVN_ITEMEXPANDEDA = (TVN_FIRST-6)
TVN_ITEMEXPANDEDW = (TVN_FIRST-55)
TVN_BEGINDRAGA = (TVN_FIRST-7)
TVN_BEGINDRAGW = (TVN_FIRST-56)
TVN_BEGINRDRAGA = (TVN_FIRST-8)
TVN_BEGINRDRAGW = (TVN_FIRST-57)
TVN_DELETEITEMA = (TVN_FIRST-9)
TVN_DELETEITEMW = (TVN_FIRST-58)
TVN_BEGINLABELEDITA = (TVN_FIRST-10)
TVN_BEGINLABELEDITW = (TVN_FIRST-59)
TVN_ENDLABELEDITA = (TVN_FIRST-11)
TVN_ENDLABELEDITW = (TVN_FIRST-60)
TVN_KEYDOWN = (TVN_FIRST-12)
TVN_GETINFOTIPA = (TVN_FIRST-13)
TVN_GETINFOTIPW = (TVN_FIRST-14)
TVN_SINGLEEXPAND = (TVN_FIRST-15)
TVN_SELCHANGING = TVN_SELCHANGINGA
TVN_SELCHANGED = TVN_SELCHANGEDA
TVN_GETDISPINFO = TVN_GETDISPINFOA
TVN_SETDISPINFO = TVN_SETDISPINFOA
TVN_ITEMEXPANDING = TVN_ITEMEXPANDINGA
TVN_ITEMEXPANDED = TVN_ITEMEXPANDEDA
TVN_BEGINDRAG = TVN_BEGINDRAGA
TVN_BEGINRDRAG = TVN_BEGINRDRAGA
TVN_DELETEITEM = TVN_DELETEITEMA
TVN_BEGINLABELEDIT = TVN_BEGINLABELEDITA
TVN_ENDLABELEDIT = TVN_ENDLABELEDITA
TVN_GETINFOTIP = TVN_GETINFOTIPA
TVCDRF_NOIMAGES = 65536
WC_COMBOBOXEXA = "ComboBoxEx32"
WC_COMBOBOXEX = WC_COMBOBOXEXA
CBEIF_TEXT = 1
CBEIF_IMAGE = 2
CBEIF_SELECTEDIMAGE = 4
CBEIF_OVERLAY = 8
CBEIF_INDENT = 16
CBEIF_LPARAM = 32
CBEIF_DI_SETITEM = 268435456
CBEM_INSERTITEMA = (WM_USER + 1)
CBEM_SETIMAGELIST = (WM_USER + 2)
CBEM_GETIMAGELIST = (WM_USER + 3)
CBEM_GETITEMA = (WM_USER + 4)
CBEM_SETITEMA = (WM_USER + 5)
#CBEM_DELETEITEM = CB_DELETESTRING
CBEM_GETCOMBOCONTROL = (WM_USER + 6)
CBEM_GETEDITCONTROL = (WM_USER + 7)
CBEM_SETEXSTYLE = (WM_USER + 8) # use SETEXTENDEDSTYLE instead
CBEM_SETEXTENDEDSTYLE = (WM_USER + | |
self.createWindow()
self.p.sigTreeStateChanged.connect(self.change)
def closeEvent(self, event):
print 'Load custom matix: closing'
def okButtonFunct(self):
self.close()
def cancelButtonFunct(self):
self.close()
def createWindow(self):
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
### param tree definition
params = [{
'name': 'Load custom matrix', 'type': 'group',
'children': [
{'name': 'Load matrix', 'type': 'action'},
{'name': 'Dimension X', 'type': 'int',
'value': self.matSizeX, 'step' : 1,
'tip': "matrix X length"},
{'name': 'Dimension Y', 'type': 'int',
'value': self.matSizeY, 'step' : 1,
'tip': "matrix Y length"},
{'name': 'Order', 'type': 'str',
'value': self.dataOrder,
'tip': "Data order: C, read rows or Fortran, read columns"},
{'name': 'Data type', 'type': 'str',
'value': self.dataType,
'tip': "Type of single matrix cell (ex. H - \
2byte unsigned intigers, I - 4byte unsigned intigers)"},
{'name': 'Endian type', 'type': 'str',
'value': self.dataEndian, 'tip': "Data endian type"},
{'name': 'Skip ... first bytes',
'type': 'int', 'value': self.skipFirstBytes, 'step' : 1,
'tip': "Don't read first ... bytes of loaded file"},
{'name': 'Skip ... last bytes', 'type': 'int',
'value': self.skipLastBytes, 'step' : 1,
'tip': "Don't read last ... bytes of loaded file"}]}]
self.p = Parameter.create(name='params', type='group', children=params)
self.t = ParameterTree()
self.t.setParameters(self.p, showTop=False)
self.layout.addWidget(self.t)
self.resize(500,640)
textbrowser = QtGui.QTextBrowser()
textbrowser.append(
' Visit https://docs.python.org/2/library/struct.html for more \
detailed informations about data types and endians.\n\n\
Example: in case of .mat 4096x4096 matrix data type is H, \
because matrix cells are 2 byte unsigned intigers. \
Endian should be set to "<" (little endian).')
textbrowser.setOpenExternalLinks(True)
self.layout.addWidget(textbrowser)
okButton = QtGui.QPushButton('OK', clicked = self.okButtonFunct)
cancelButton = QtGui.QPushButton(
'Cancel', clicked = self.cancelButtonFunct)
self.layout.addWidget(okButton)
self.layout.addWidget(cancelButton)
def readBinaryMatrix(self, f):
f.seek(0)
dataFormat = self.dataEndian + self.dataType
self.matrix = np.fromfile(f, dtype=dataFormat)
self.matrix = self.matrix[:]
if self.skipFirstBytes:
self.matrix = self.matrix[self.skipFirstBytes:]
if self.skipLastBytes:
self.matrix = self.matrix[:-self.skipLastBytes]
if self.dataOrder == 'F':
self.matrix = self.matrix.reshape(
(self.matSizeX,self.matSizeY), order="F")
else:
self.matrix.shape = (self.matSizeX,self.matSizeY)
window.matrix = self.matrix
window.showMatrix(1)
def readNonBinaryMatrix(self, f):
print 'under construction'
def change(self, param, changes):
for param, change, data in changes:
path = self.p.childPath(param)
### Load Matrix button clicked
if path[1] == 'Load matrix':
fileName = QtGui.QFileDialog.getOpenFileName(
self, "Open file","", "Any file(*)")
### checking if file is binary or text type
with open(fileName,'rb') as f:
isBinary = False
for block in f:
if '\0' in block:
isBinary = True
break
if isBinary:
self.readBinaryMatrix(f)
else:
self.readNonBinaryMatrix(f)
print 'is ' + str(fileName) + ' binary: ' + str(isBinary)
### Dimension X
elif path[1] == 'Dimension X':
self.matSizeX = int(data)
### Dimension Y
elif path[1] == 'Dimension Y':
self.matSizeY = int(data)
### Data order
elif path[1] == 'Order':
self.dataOrder = str(data)
### Data type
elif path[1] == 'Data type':
self.dataType = str(data)
### Endian type
elif path[1] == 'Endian type':
self.endianType = str(data)
### skip first bytes of file
elif path[1] == 'Skip ... first bytes':
self.skipFirstBytes = int(data)
### skip last bytes of file
elif path[1] == 'Skip ... last bytes':
self.skipLastBytes = int(data)
### Main window and functions ###
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.refreshTime = 200 #refresh interval in ms
self.energyCalibAxis = 0.5 #default energy calibration: 0.5keV/channel
self.plusRoiList = [] #list of plus rois
self.minusRoiList = [] #list of minus rois
self.groupRoiList = [] #list of groups
self.displaySpectra = [] #additional spectra to display
self.programRunning = True #start/stop function needs this
self.isShakeRemoveActive = False
self.legendVisible = False
self.setupUserInterface() #creates GUI
self.minPeakWidth = 5 #for peak find
self.maxPeakWidth = 25 #for peak find
self.noisePeakWidth = 0.1 #for peak find
self.peakFindActive = False #for peak find auto refresh
self.peaksLabelsUpper = [] #for peak find
self.peaksLabelsLower = [] #for peak find
self.ifTranspose = False #start with untransposed matrix
self.additionalFunctionsMenu() #functions not usable for most users
def setupUserInterface(self):
""" Initialise the User Interface """
# upper frame
self.upperFrame = QtGui.QFrame()
self.upperFrameLayout = QtGui.QHBoxLayout()
self.upperFrame.setLayout(self.upperFrameLayout)
self.upperFrame.setLineWidth(0)
self.upperFrame.setFrameStyle(QtGui.QFrame.Panel)
self.upperFrameLayout.setContentsMargins(0,0,0,0)
# upper frame contents
self.viewUpper = GraphicsLayoutWidget()
self.upperFrameLayout.addWidget(self.viewUpper)
self.vbUpper = pg.PlotItem(title='Matrix projection')
self.energyAxisUpper = self.vbUpper.axes["top"]["item"]#additional axis
self.energyAxisUpper.setScale(self.energyCalibAxis)
self.energyAxisUpper.show()
self.viewUpper.addItem(self.vbUpper)
# lower frame
self.lowerFrame = QtGui.QFrame()
self.lowerFrameLayout = QtGui.QHBoxLayout()
self.lowerFrame.setLayout(self.lowerFrameLayout)
self.lowerFrame.setLineWidth(0)
self.lowerFrame.setFrameStyle(QtGui.QFrame.Panel)
self.lowerFrameLayout.setContentsMargins(0,0,0,0)
# lower frame content
self.viewLower = GraphicsLayoutWidget()
self.lowerFrameLayout.addWidget(self.viewLower)
self.vbLower = pg.PlotItem(title='Gated spectrum')
self.energyAxisLower = self.vbLower.axes["top"]["item"]#additional axis
self.energyAxisLower.setScale(self.energyCalibAxis)
self.energyAxisLower.show()
self.viewLower.addItem(self.vbLower)
# UI window (containing left and right frames)
UIwindow = QtGui.QWidget(self)
UIwindowLayout = QtGui.QHBoxLayout()
UIwindowSplitter = QtGui.QSplitter(QtCore.Qt.Vertical)
UIwindowLayout.addWidget(UIwindowSplitter)
UIwindow.setLayout(UIwindowLayout)
self.setCentralWidget(UIwindow)
UIwindowSplitter.addWidget(self.upperFrame)
UIwindowSplitter.addWidget(self.lowerFrame)
#Legend items
self.vbUpper.addLegend()
self.vbUpper.legend.anchor((1,0), (1,0))
self.vbUpper.legend.hide()
self.vbLower.addLegend()
self.vbLower.legend.anchor((1,0), (1,0))
self.vbLower.legend.hide()
# Status bar
self.windowStatusBar = QtGui.QStatusBar()
self.currentNameStatus = QtGui.QLabel(' ') #matrix name and path
self.transposeStatus = QtGui.QLabel(' ') #is transposed?
self.moveToRemoveStatus = QtGui.QLabel(' ') #is move to remove ON?
self.windowStatusBar.insertPermanentWidget(
0, self.currentNameStatus, 2)
self.windowStatusBar.insertPermanentWidget(
1, self.transposeStatus, 2)
self.windowStatusBar.insertPermanentWidget(
2, self.moveToRemoveStatus, 2)
self.setStatusBar(self.windowStatusBar)
# Application window
self.windowTitle = 'MakeMyGate v2.0'
self.setWindowTitle(self.windowTitle)
self.resize(1300,600)
# Window menus
self.createMenus()
self.createActions()
def createMenus(self):
# Menus list
menubar = self.menuBar()
self.fileMenu = menubar.addMenu('File')
self.roiMenu = menubar.addMenu('ROI')
self.optionsMenu = menubar.addMenu('Options')
self.spectrumMenu = menubar.addMenu('Spectrums')
self.utilitiesMenu = menubar.addMenu('Fitting')
self.aboutMenu = menubar.addMenu('About')
def createActions(self):
# file Menu
self.exitAct = QtGui.QAction(
"Quit", self, shortcut="Ctrl+Q",
statusTip="Exit the application")
self.loadMatrix = QtGui.QAction("Load matrix", self, shortcut="Ctrl+L")
self.saveSpe = QtGui.QAction("Save SPE", self, shortcut="Ctrl+S")
self.saveRoiListToFile = QtGui.QAction("Save ROIs to file", self)
self.loadRoiList = QtGui.QAction("Load ROIs from file", self)
self.loadCustomMatrix = QtGui.QAction("Load custom matrix", self)
fileMenuActions = [
self.loadMatrix, self.exitAct, self.saveSpe,
self.saveRoiListToFile, self.loadRoiList,
self.loadCustomMatrix]
fileMenuActFuncs = [
self.loadMatrixFunct, self.close, self.saveSpeFunct,
self.saveRoiListToFileFunct, self.loadRoiListFunct,
self.loadCustomMatrixFunct]
for i in xrange(len(fileMenuActions)):
action = fileMenuActions[i]
function = fileMenuActFuncs[i]
action.triggered[()].connect(function)
self.fileMenu.addAction(self.loadMatrix)
self.fileMenu.addAction(self.loadCustomMatrix)
self.fileMenu.addAction(self.saveSpe)
self.fileMenu.addAction(self.saveRoiListToFile)
self.fileMenu.addAction(self.loadRoiList)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
# ROI menu
self.addRoiPlus = QtGui.QAction("Add ROI+", self, shortcut="Ctrl++")
self.addRoiMinus = QtGui.QAction("Add ROI-", self, shortcut="Ctrl+-")
self.removeRoiPlus = QtGui.QAction(
"Remove last ROI+", self, shortcut="Ctrl+Shift++")
self.removeRoiMinus = QtGui.QAction(
"Remove last ROI-", self, shortcut="Ctrl+Shift+-")
self.removeAllPlusRois = QtGui.QAction("Remove every ROI+", self)
self.removeAllMinusRois = QtGui.QAction("Remove every ROI-", self)
self.removeAllRois = QtGui.QAction("Remove every ROI", self)
self.addGroupRoi = QtGui.QAction("Add group", self, shortcut="Ctrl+*")
self.removeGroupRoi = QtGui.QAction(
"Remove last group", self, shortcut="Ctrl+Shift+*")
self.removeAllGroupRoi = QtGui.QAction("Remove every group", self)
self.shakeRoiRemove = QtGui.QAction("Move ROI to remove it: OFF", self)
roiMenuActions = [
self.addRoiPlus, self.addRoiMinus, self.removeRoiPlus,
self.removeRoiMinus, self.removeAllPlusRois,
self.removeAllMinusRois, self.removeAllRois, self.addGroupRoi,
self.removeGroupRoi, self.removeAllGroupRoi, self.shakeRoiRemove]
roiMenuActFuncs = [
self.addRoiPlusFunct, self.addRoiMinusFunct,
self.removeRoiPlusFunct,
self.removeRoiMinusFunct, self.removeAllPlusRoisFunct,
self.removeAllMinusRoisFunct, self.removeAllRoisFunct,
self.addGroupRoiFunct, self.removeGroupRoiFunct,
self.removeAllGroupRoiFunct, self.shakeRoiRemoveFunct]
for i in xrange(len(roiMenuActions)):
action = roiMenuActions[i]
function = roiMenuActFuncs[i]
action.triggered[()].connect(function)
self.roiMenu.addAction(self.addRoiPlus)
self.roiMenu.addAction(self.addRoiMinus)
self.roiMenu.addAction(self.removeRoiPlus)
self.roiMenu.addAction(self.removeRoiMinus)
self.roiMenu.addAction(self.addGroupRoi)
self.roiMenu.addAction(self.removeGroupRoi)
self.roiMenu.addSeparator()
self.roiMenu.addAction(self.removeAllPlusRois)
self.roiMenu.addAction(self.removeAllMinusRois)
self.roiMenu.addAction(self.removeAllGroupRoi)
self.roiMenu.addAction(self.removeAllRois)
self.roiMenu.addSeparator()
self.roiMenu.addAction(self.shakeRoiRemove)
## Options menu
self.setRefreshInterval = QtGui.QAction(
"Set refresh interval", self)
self.startStopRefresh = QtGui.QAction(
"Start/Stop refreshing", self, shortcut="Ctrl+X")
self.setCalibration = QtGui.QAction("Set energy calibration", self)
self.peakFind = QtGui.QAction("Peak find", self, shortcut="Ctrl+P")
self.peakFindParams = QtGui.QAction(
"Adjust peak find parameters", self)
self.transposeMatrix = QtGui.QAction(
"Transpose Matrix", self, shortcut="Ctrl+T")
self.displayLegend = QtGui.QAction("Display legend", self)
optionsMenuActions = [
self.setRefreshInterval, self.startStopRefresh,
self.setCalibration, self.peakFind, self.peakFindParams,
self.transposeMatrix, self.displayLegend]
optionsMenuFuncs = [
self.setRefreshIntervalFunct, self.startStopRefreshFunct,
self.setCalibrationFunct, self.peakFindFunct,
self.peakFindParamsFunct,
self.transposeMatrixFunct, self.displayLegendFunct]
for i in xrange(len(optionsMenuActions)):
action = optionsMenuActions[i]
function = optionsMenuFuncs[i]
action.triggered[()].connect(function)
self.optionsMenu.addAction(self.setRefreshInterval)
self.optionsMenu.addAction(self.startStopRefresh)
self.optionsMenu.addAction(self.displayLegend)
self.optionsMenu.addAction(self.setCalibration)
self.optionsMenu.addAction(self.transposeMatrix)
self.optionsMenu.addSeparator()
self.optionsMenu.addAction(self.peakFind)
self.optionsMenu.addAction(self.peakFindParams)
# Additional spectrums menu
self.addSpectrum = QtGui.QAction("Display additional spectrum", self)
self.removeSpectra = QtGui.QAction("Remove all spectra", self)
spectrumMenuActions = [self.addSpectrum, self.removeSpectra]
spectrumMenuFuncs = [self.addSpectrumFunct, self.removeSpectrumFunct]
for i in xrange(len(spectrumMenuActions)):
action = spectrumMenuActions[i]
function = spectrumMenuFuncs[i]
action.triggered[()].connect(function)
self.spectrumMenu.addAction(self.addSpectrum)
self.spectrumMenu.addSeparator()
self.spectrumMenu.addAction(self.removeSpectra)
self.spectrumMenu.addSeparator()
# Utilities menu
self.utiRoiAdd = QtGui.QAction("Add Fit ROI", self, shortcut="F2")
self.utiRoiRemove = QtGui.QAction("Remove Fit ROI", self)
self.fitPeak = QtGui.QAction("Fit peak in ROI", self, shortcut="F4")
self.fitNextPeak = QtGui.QAction("Fit 2nd peak in ROI", self)
self.areaUnderPeak = QtGui.QAction("Calculate area", self)
self.bgRoi = QtGui.QAction("Add background ROI", self, shortcut="F3")
self.bgRoiRemove = QtGui.QAction("Remove background ROI", self)
self.pasternakShap = QtGui.QAction("Paternak Shape", self)
self.pasternakSingls = QtGui.QAction("Paternak Singlsh", self)
utilitiesMenuActions = [
self.utiRoiAdd, self.utiRoiRemove,
self.fitPeak, self.fitNextPeak,
self.areaUnderPeak, self.bgRoi,
self.bgRoiRemove, self.pasternakShap,
self.pasternakSingls]
utilitiesMenuFuncs = [
self.utiRoiFunct, self.utiRoiRemoveFunct,
self.fitPeakFunct, self.fitNextPeakFunct,
self.areaUnderPeakFunct, self.bgRoiFunct,
self.bgRoiRemoveFunct, self.pasternakShape,
self.pasternakSinglsh]
for i in xrange(len(utilitiesMenuActions)):
action = utilitiesMenuActions[i]
function = utilitiesMenuFuncs[i]
action.triggered[()].connect(function)
self.utilitiesMenu.addAction(self.utiRoiAdd)
self.utilitiesMenu.addAction(self.utiRoiRemove)
self.utilitiesMenu.addAction(self.bgRoi)
self.utilitiesMenu.addAction(self.bgRoiRemove)
self.utilitiesMenu.addSeparator()
self.utilitiesMenu.addAction(self.fitPeak)
self.utilitiesMenu.addAction(self.fitNextPeak)
self.utilitiesMenu.addAction(self.areaUnderPeak)
# self.utilitiesMenu.addAction(self.pasternakShap)
# self.utilitiesMenu.addAction(self.pasternakSingls)
# About page
| |
from factoratio.fuel import Burner, Fuel
from factoratio.item import Ingredient, Recipe
from factoratio.util import Joule, Watt
class Module():
"""A module for a Producer.
Modifies various aspects of the Producer's operation including energy
consumption, operation speed, productivity bonus, and pollution output.
All modifiers are a positive or negative percentage expressed as a
real number, e.g. +30% -> 0.30, -15% -> -0.15.
Attributes
----------
name : str
The name of the module. Can be anything, but is typically the in-game
name.
tier : int
A positive, non-zero integer representing the module tier. Plain old data
not used in any calculations.
energy: float
The energy multiplier for this module. Affects energy consumption rate.
speed: float
The speed multiplier for this module. Affects production speed.
productivity: float
The productivity multiplier for this module. Currently only ever
positive in Factorio. Determines amount of extra free products.
pollution: float
The pollution multiplier for this module. Affects amount of pollution
produced.
"""
def __init__(self, name: str, tier: int, energy: float, speed: float,
productivity: float, pollution: float):
self.name = name
self.tier = tier
self.energy = energy
self.speed = speed
self.productivity = productivity
self.pollution = pollution
def __repr__(self):
return (f'{__class__.__name__}({self.name!r}, {self.tier!r}, '
f'{self.energy!r}, {self.speed!r}, {self.productivity!r}, '
f'{self.pollution!r})')
def __str__(self):
return f'Tier {self.tier} {self.name} Module'
class Producer():
"""Base class for entities that produce an Item as output.
Attributes
----------
name: str
The name of this Producer. Can be anything, but is typically the
in-game name.
craftSpeed: float
The speed at which this Producer crafts a given Recipe; the Recipe
crafting time is divided by this speed to yield the total crafting
time.
maxSlots: int
The total number of module slots this Producer supports.
energyUsage: factoratio.util.Watt
The amount of energy consumed by this Producer per second while
actively working, i.e. for the duration of a crafting cycle.
drain: factoratio.util.Watt
The amount of energy constantly consumed by this Producer, just by
being connected to the power grid.
pollution: float
The amount of pollution produced per minute while operating.
"""
def __init__(self, name: str, craftSpeed: float, maxSlots: int,
energyUsage: Watt, drain: Watt, pollution: float):
self.name = name
self.craftSpeed = craftSpeed
self.maxSlots = maxSlots
self.modules = [None] * self.maxSlots
self.energyUsage = energyUsage
self.drain = drain
self.pollution = pollution
def __repr__(self):
return (f'{self.__class__.__name__}({self.name!r}, {self.craftSpeed!r}, '
f'{self.maxSlots!r}, {self.energyUsage!r}, {self.drain!r}, '
f'{self.pollution!r})')
def __str__(self):
return self.name
def _getMultiplier(self, category: str) -> float:
"""Return the multiplier of the given category from module effects."""
multiplier = 1.0
for m in self.modules:
if isinstance(m, Module):
multiplier += getattr(m, category)
return round(multiplier, 6) # XXX: Hack around 1.1 + 0.1 and similar
def speedMultiplier(self) -> float:
"""Return the Producer's crafting speed multiplier."""
return self._getMultiplier('speed')
def energyMultiplier(self) -> float:
"""Return the Producer's energy usage multiplier."""
return self._getMultiplier('energy')
def productivityMultiplier(self) -> float:
"""Return the Producer's added productivity multiplier."""
return self._getMultiplier('productivity')
def pollutionMultiplier(self) -> float:
"""Return the Producer's pollution multiplier."""
return self._getMultiplier('pollution')
def effectivePollutionMultiplier(self) -> float:
"""Return the Producer's effective pollution multiplier.
The effective pollution multiplier is the product of the pollution and
energy multipliers.
"""
return self.pollutionMultiplier() * self.energyMultiplier()
def craft(self, recipe: Recipe) -> dict:
"""Crafts the given input Recipe with the Producer's current stats.
Returns a dict with the results of the craft: the craft duration, the
Recipe output, energy consumed, and pollution produced.
Parameters
----------
recipe: Recipe
The Recipe to craft.
"""
craftTime = recipe.time / (self.craftSpeed * self.speedMultiplier())
energyMult = self.energyMultiplier()
energyConsumed = Joule(
(self.drain + self.energyUsage * energyMult).value) * craftTime
# NOTE: Pollution stat is per minute
pollutionCreated = (self.pollution * self.pollutionMultiplier() *
energyMult * (craftTime / 60))
return {'duration': craftTime, 'output': recipe.output,
'energy': energyConsumed, 'pollution': pollutionCreated}
# TODO: For this and all subclasses, have itemName default to None, and pick the first output
# actually, production functions don't need to care about WHICH output...
def productionRate(self, recipe: Recipe, itemName: str,
count: int=1) -> float:
"""Return the rate that an Item is produced, in items per second.
Parameters
----------
recipe: Recipe
The Recipe to examine.
itemName: str
The specific Recipe product to obtain the production rate for.
count: int, optional
The number of identical Producers concurrently crafting this Recipe;
acts as a multiplier. Defaults to one.
"""
ingredient = recipe.getOutputByName(itemName)
return (count * ingredient.count * self.productivityMultiplier()
/ self.craft(recipe)['duration'])
def productionRateInverse(self, recipe: Recipe, itemName: str,
ips: float=1.0) -> float:
"""Return the number of these Producers needed to reach the given rate.
Parameters
----------
recipe: Recipe
The Recipe to examine.
itemName: str
The specific Recipe product being produced.
ips: float, optional
The target production rate to meet. Defaults to one item per second.
"""
ingredient = recipe.getOutputByName(itemName)
return (ips * self.craft(recipe)['duration'] /
(ingredient.count * self.productivityMultiplier()))
def consumptionRate(self, recipe: Recipe, itemName: str,
count: int=1) -> float:
"""Return the rate that an Item is consumed, in items per second.
Parameters
----------
recipe: Recipe
The Recipe to examine.
itemName: str
The specific Recipe product to obtain the consumption rate for.
count: int, optional
The number of identical Producers concurrently crafting this Recipe;
acts as a multiplier. Defaults to one.
"""
ingredient = recipe.getInputByName(itemName)
return count * ingredient.count / self.craft(recipe)['duration']
def consumptionRateInverse(self, recipe: Recipe, itemName: str,
ips: float=1.0) -> float:
"""Return the number of these Producers needed to reach the given rate.
Parameters
----------
recipe: Recipe
The Recipe to examine.
itemName: str
The specific Recipe ingredient being consumed.
ips: float, optional
The target consumption rate to meet. Defaults to one item per second.
"""
ingredient = recipe.getInputByName(itemName)
return ips * self.craft(recipe)['duration'] / ingredient.count
def rates(self, recipe: Recipe, count: int=1) -> dict:
"""Calculate all rates for this Producer.
Generates a report of every rate associated with this Producer, including
energy consumption, pollution generated, individual items consumed and
produced, and the count of Producers used.
Rates are given as units per second.
Parameters
----------
recipe: Recipe
The Recipe to base the rates on.
count: int, optional
The number of identical Producers concurrently crafting this Recipe;
acts as a multiplier. Defaults to one.
"""
craftResult = self.craft(recipe)
duration = craftResult['duration']
consumed, produced = [], []
for ingredient in recipe.input:
name = ingredient.what.name
consumed.append((ingredient, self.consumptionRate(recipe, name, count)))
for ingredient in recipe.output:
name = ingredient.what.name
produced.append((ingredient, self.productionRate(recipe, name, count)))
return {
'producers': count,
'consumed': consumed,
'produced': produced,
'energy': Watt(craftResult['energy'].value) / duration * count,
'pollution': craftResult['pollution'] / duration * count
}
class BurnerProducer(Producer, Burner):
"""Class representing a burner producer.
A burner producer is simply a Producer that is powered by burning a Fuel.
"""
def rates(self, recipe: Recipe, fuel: Fuel, count: int=1) -> dict:
"""Calculate all rates for this Producer.
Extended from the Producer base class to include the amount of Fuel
burned.
Parameters
----------
recipe: Recipe
The Recipe to base the rates on.
fuel: factoratio.fuel.Fuel
The fuel being burned.
count: int, optional
The number of identical Producers concurrently crafting this Recipe;
acts as a multiplier. Defaults to one.
"""
rateDict = super().rates(recipe, count)
rateDict['fuel'] = rateDict['energy'].value / fuel.energy.value
return rateDict
def productsPerFuel(self, recipe: Recipe, itemName: str, fuel: Fuel,
count: int=1) -> float:
"""The number of Items produced per unit of Fuel burned.
Parameters
----------
recipe: Recipe
The Recipe to examine.
itemName: str
The specific Recipe ingredient being produced.
fuel: factoratio.fuel.Fuel
The Fuel being burned.
count: int, optional
The number of Producers running concurrently. Defaults to one.
"""
return (fuel.burnTime(self.energyUsage)
/ self.productionRate(recipe, itemName, count))
class MiningDrill(Producer):
"""A class representing a mining drill.
A mining drill is a Producer that consumes energy to extract a resource
from the ground it is placed on.
Most methods inherited from Producer have had their behavior slightly
modified to suit the more basic nature of vanilla Factorio mining recipes.
See also: Recipe.miningRecipe.
"""
def productionRate(self, recipe: Recipe, itemName: str=None,
count: int=1) -> float:
"""Return the rate that an Item is produced, in items per second.
Typically Recipe.miningRecipe is used to construct the Recipe object, but
this is not a hard requirement.
Parameters
----------
recipe: Recipe
The Recipe to examine.
itemName: str, optional
The | |
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_cluster_name_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_cluster_name_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_cluster_name_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_version_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_version_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_version_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_version_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_ring_args(object):
"""
Attributes:
- keyspace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'keyspace', None, None, ), # 1
)
def __init__(self, keyspace=None,):
self.keyspace = keyspace
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.keyspace = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_ring_args')
if self.keyspace is not None:
oprot.writeFieldBegin('keyspace', TType.STRING, 1)
oprot.writeString(self.keyspace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keyspace is None:
raise TProtocol.TProtocolException(message='Required field keyspace is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_ring_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TokenRange, TokenRange.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype258, _size255) = iprot.readListBegin()
for _i259 in xrange(_size255):
_elem260 = TokenRange()
_elem260.read(iprot)
self.success.append(_elem260)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_ring_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter261 in self.success:
iter261.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_partitioner_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_partitioner_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_partitioner_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_partitioner_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_snitch_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_snitch_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self | |
<gh_stars>0
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import Levenshtein
import time
from tqdm import tqdm
import argparse
import yaml
from . import lineageGroup_utils as lg_utils
#import .lineageGroup_utils as lg_utils
# suppress warnings for mapping thmt takes place in some filter functions
pd.options.mode.chained_assignment = None
def create_output_dir(outputdir = None):
"""
A simple function to e an output directory to store important logging informmtion,
as well as important figures for qc
"""
if outputdir is None:
i = 1
outputdir = "output" + str(i)
while os.path.exists(outputdir):
i += 1
outputdir = "output" + str(i)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(outputdir + "/filterlog.txt", "w") as f:
f.write("FILTER MOLECULE TABLE OUTPUT LOG:\n")
with open(outputdir + "/eclog_umi.txt", "w") as f:
f.write("CellBC\tUR\tUC\tN_READS(R)\tN_READS(C)\n")
with open(outputdir + "/log_pickalleles.txt", "w") as f:
f.write("IntBC\tAR\tAC\tN_UMI(R)\tN_UMI(C)\n")
return outputdir
def record_stats(moleculetable, outputdir, stage="Init"):
"""
Simple function to record the number of UMIs and create QC plots for UMIs
:param moleculetable: allele table
:param outputdir: file pmth to the output directory
:return: Num of UMIs
"""
# Log number of UMIs
numis = moleculetable.shape[0]
# Count number of intBCs per cellBC
iBCs = moleculetable.groupby(["cellBC"]).agg({"intBC": "nunique"})["intBC"]
# Count UMI per intBC
umi_per_ibc = np.array([])
for n, g in tqdm(moleculetable.groupby(["cellBC"]), desc="Recording stats"):
x = g.groupby(["intBC"]).agg({"UMI": "nunique"})["UMI"]
if x.shape[0] > 0:
umi_per_ibc = np.concatenate([umi_per_ibc, np.array(x)])
# Count UMI per cellBC
umi_per_cbc = moleculetable.groupby(['cellBC']).agg({"UMI": 'count'}).sort_values("UMI", ascending=False)["UMI"]
return np.array(moleculetable["readCount"]), umi_per_ibc, np.array(umi_per_cbc)
def filterCellBCs(moleculetable, outputdir, umiCountThresh = 10, verbose=True):
"""
Filter out cell barcodes thmt have too few UMIs
:param moleculetable: allele table
:param outputdir: file pmth to output directory
:return: filtered allele table, cellBC to number umis mapping
"""
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("FILTER CELL BARCODES:\n")
f.write("Initial:\n")
f.write("# Reads: " + str(sum(moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(moleculetable["cellBC"]))) + "\n")
tooFewUMI_UMI = []
cellBC2nM = {}
# Create a cell-filter dictionary for hash lookup lmter on when filling
# in the table
cell_filter = {}
for n, group in tqdm(moleculetable.groupby(["cellBC"]), desc="Filter cells"):
if group.shape[0] <= umiCountThresh:
cell_filter[n] = "bad"
tooFewUMI_UMI.append(group.shape[0])
else:
cell_filter[n] = "good"
cellBC2nM[n] = group.shape[0]
# apply the filter using the hash table created above
moleculetable["status"] = moleculetable["cellBC"].map(cell_filter)
# count how many cells/umi's passed the filter for logging purposes
status = cell_filter.values()
tooFewUMI_cellBC = len(status) - len(np.where(status == "good")[0])
tooFewUMI_UMI = np.sum(tooFewUMI_UMI)
goodumis = moleculetable[moleculetable["status"] == "good"].shape[0]
# filter based on status & reindex
n_moleculetable = moleculetable[(moleculetable["status"] == "good")]
n_moleculetable.index = [i for i in range(n_moleculetable.shape[0])]
# log results
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("Post:\n")
f.write("# Reads: " + str(sum(n_moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(n_moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(n_moleculetable["cellBC"]))) + "\n\n")
return n_moleculetable, cellBC2nM
def errorCorrectUMI(moleculetable, outputdir, bcDistThresh = 1, allelePropThresh = 0.2, verbose=True):
"""
Error correct UMIs based on allele & cellBC informmtion
:param moleculetable: moleculetable
:return: allele table with corrected UMIs
"""
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("ERROR CORRECT UMIs:\n")
f.write("Initial:\n")
f.write("# Reads: " + str(sum(moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(moleculetable["cellBC"]))) + "\n")
num_UMI_corrected = 0
num_reads_corrected = 0
to_drop = np.array([])
for n, g in tqdm(moleculetable.groupby(["cellBC"])):
# Let's correct UMIs by alleles -- if the same cellBC/UMI pair map to the same allele, let's try to assign the correct UMI barcode.
#x1 = group.groupby(["UMI", "allele", "intBC"]).agg({'readCount': 'sum'}).sort_values("readCount", ascending=False).reset_index()
# If we have more than one UMI in a cellBC (this should definitely be true)
g = g.sort_values("readCount", ascending=False).reset_index()
if g.shape[0] > 0:
corrected_r = []
for r1 in range(g.shape[0]):
uBC1, allele1, iBC1 = g.loc[r1, "UMI"], g.loc[r1, "allele"], g.loc[r1, "intBC"]
for r2 in range(r1 + 1, g.shape[0]):
uBC2, allele2, iBC2 = g.loc[r2, "UMI"], g.loc[r2, "allele"], g.loc[r2, "intBC"]
# Compute the levenshtein distance between both umis
bcl = Levenshtein.distance(uBC1, uBC2)
# If we've found two UMIs thmt are reasonably similar with the same allele and iBC, let's try to error correct.
if bcl <= bcDistThresh and allele1 == allele2 and iBC1 == iBC2:
totalCount = g.loc[[r1, r2], "readCount"].sum()
props = g.loc[[r1, r2], "readCount"] / totalCount
# Let's just error correct towards the more highly represented UMI iff the allele proportion of the lowly
# represented UMI is below some threshold
if props[r2] <= allelePropThresh and r1 not in corrected_r:
badlocs = moleculetable[(moleculetable["cellBC"] == n) & (moleculetable["UMI"] == uBC2)]
corrlocs = moleculetable[(moleculetable["cellBC"] == n) & (moleculetable["UMI"] == uBC1)]
corrected_r.append(r2)
if len(badlocs.index.values) > 0 and badlocs.index.values[0] in moleculetable.index:
moleculetable.loc[corrlocs.index.values, "readCount"] += badlocs["readCount"].iloc[0]
moleculetable = moleculetable.drop(badlocs.index.values)
#to_drop = np.concatenate((to_drop, badlocs.index.values))
num_UMI_corrected += 1
num_reads_corrected += g.loc[r2, "readCount"]
if verbose:
with open(outputdir + "/eclog_umi.txt", "a") as f:
f.write(n + "\t" + uBC2 + "\t" + uBC1 + "\t")
f.write(str(g.loc[r2, "readCount"]) + "\t" + str(g.loc[r1, "readCount"]) + "\n")
# log results
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("Post:\n")
f.write("# Reads: " + str(sum(moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(moleculetable["cellBC"]))) + "\n\n")
moleculetable.index = [i for i in range(moleculetable.shape[0])]
return moleculetable
def filterUMIs(moleculetable, outputdir, readCountThresh=100, verbose=True):
"""
Filter out low-read UMIs
:param alleltable: allele table to be filtered
:param readCountThresh: read count theshold on which to filter UMIs
:return: filtered allele table
"""
t0 = time.time()
# log results
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("FILTER UMIS:\n")
f.write("Initial:\n")
f.write("# Reads: " + str(sum(moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(moleculetable["cellBC"]))) + "\n")
filteredReads = []
# filter based on status & reindex
n_moleculetable = moleculetable[(moleculetable["readCount"] > readCountThresh)]
n_moleculetable.index = [i for i in range(n_moleculetable.shape[0])]
# log results
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("Post:\n")
f.write("# Reads: " + str(sum(n_moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(n_moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(n_moleculetable["cellBC"]))) + "\n\n")
print("FILTER MOLECULE TIME: " + str(time.time() - t0))
return n_moleculetable
def errorCorrectIntBC(moleculetable, outputdir, prop = 0.5, umiCountThresh = 10,
bcDistThresh = 1, verbose=True):
"""
Filter integration barcodes by their alleles and UMI proportion.
:param moleculetable: allele table
:param outputdir: file path to output directory
:param prop: proportion by which to filter integration barcodes
:param umiCountThresh: maximum umi count for which to correct barcodes
:param bcDistThresh: barcode distance threshold, to decide what's similar enough to error correct
:param verbose: boolean, indicating whether or not to write to output
:return: filtered allele table with bad UMIs thrown out/error corrected
"""
# log results
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("ERROR CORRECT INTBCs:\n")
f.write("Initial:\n")
f.write("# Reads: " + str(sum(moleculetable["readCount"])) + "\n")
f.write("# UMIs: " + str(moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(moleculetable["cellBC"]))) + "\n")
# create index filter hash map
index_filter = {}
for n in moleculetable.index.values:
index_filter[n] = "good"
recovered = 0
numUMI_corrected = 0
for name, grp in tqdm(moleculetable.groupby(["cellBC"]), desc="Error Correcting intBCs"):
# name = cellBC
# grp = moleculetable[cellBC = name]
x1 = grp.groupby(["intBC", "allele"]).agg({"UMI": 'count', "readCount": 'sum'}).sort_values("UMI", ascending=False).reset_index()
if x1.shape[0] > 1:
badList = []
for r1 in range(x1.shape[0]):
iBC1, allele1 = x1.loc[r1, "intBC"], x1.loc[r1, "allele"]
for r2 in range(r1 + 1, x1.shape[0]):
iBC2, allele2 = x1.loc[r2, "intBC"], x1.loc[r2, "allele"]
bclDist = Levenshtein.distance(iBC1, iBC2)
if bclDist <= bcDistThresh and allele1 == allele2:
totalCount = x1.loc[[r1, r2], "UMI"].sum()
props = x1.loc[[r1, r2], "UMI"] / totalCount
umiCounts = x1.loc[[r1, r2], "UMI"]
# if the alleles are the same and the proportions are good, then let's error correct
if props[r2] < prop and umiCounts[r2] <= umiCountThresh:
bad_locs = moleculetable[(moleculetable["cellBC"] == name) & (moleculetable["intBC"] == iBC2) &
(moleculetable["allele"] == allele2)]
recovered += 1
numUMI_corrected += len(bad_locs.index.values)
moleculetable.loc[bad_locs.index.values, "intBC"] = iBC1
if verbose:
with open(outputdir + "/eclog_intbc.txt", "a") as f:
f.write(name + "\t" + iBC2 + "\t" + iBC1 + "\t")
f.write(str(x1.loc[r2, "UMI"]) + "\t" + str(x1.loc[r1, "UMI"]) + "\n")
# log data
if verbose:
with open(outputdir + "/filterlog.txt", "a") as f:
f.write("Post:\n")
f.write("# Reads: " + | |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess WMT17 ende training and evaluation datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import tarfile
import urllib
import six
import tensorflow as tf
from utils import tokenizer
# Data sources for training/evaluating the transformer translation model.
# If any of the training sources are changed, then either:
# 1) use the flag `--search` to find the best min count or
# 2) update the _TRAIN_DATA_MIN_COUNT constant.
# min_count is the minimum number of times a token must appear in the data
# before it is added to the vocabulary. "Best min count" refers to the value
# that generates a vocabulary set that is closest in size to _TARGET_VOCAB_SIZE.
_TRAIN_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/"
"training-parallel-nc-v12.tgz",
"input": "news-commentary-v12.de-en.en",
"target": "news-commentary-v12.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"input": "commoncrawl.de-en.en",
"target": "commoncrawl.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"input": "europarl-v7.de-en.en",
"target": "europarl-v7.de-en.de",
},
]
# Use pre-defined minimum count to generate subtoken vocabulary.
_TRAIN_DATA_MIN_COUNT = 6
_EVAL_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"input": "newstest2013.en",
"target": "newstest2013.de",
}
]
# Vocabulary constants
_TARGET_VOCAB_SIZE = 32768 # Number of subtokens in the vocabulary list.
_TARGET_THRESHOLD = 327 # Accept vocabulary if size is within this threshold
VOCAB_FILE = "vocab.ende.%d" % _TARGET_VOCAB_SIZE
# Strings to inclue in the generated files.
_PREFIX = "wmt32k"
_ENCODE_TAG = "encoded"
_TRAIN_TAG = "train"
_EVAL_TAG = "dev" # Following WMT and Tensor2Tensor conventions, in which the
# evaluation datasets are tagged as "dev" for development.
# Number of files to split train and evaluation data
_TRAIN_SHARDS = 100
_EVAL_SHARDS = 1
def find_file(path, filename, max_depth=5):
"""Returns full filepath if the file is in path or a subdirectory."""
for root, dirs, files in os.walk(path):
if filename in files:
return os.path.join(root, filename)
# Don't search past max_depth
depth = root[len(path) + 1:].count(os.sep)
if depth > max_depth:
del dirs[:] # Clear dirs
return None
###############################################################################
# Download and extraction functions
###############################################################################
def get_raw_files(raw_dir, data_source):
"""Return raw files from source. Downloads/extracts if needed.
Args:
raw_dir: string directory to store raw files
data_source: dictionary with
{"url": url of compressed dataset containing input and target files
"input": file with data in input language
"target": file with data in target language}
Returns:
dictionary with
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
"""
raw_files = {
"inputs": [],
"targets": [],
} # keys
for d in data_source:
input_file, target_file = download_and_extract(
raw_dir, d["url"], d["input"], d["target"])
raw_files["inputs"].append(input_file)
raw_files["targets"].append(target_file)
return raw_files
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def download_from_url(path, url):
"""Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
"""
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
tf.logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress.
print()
tf.gfile.Rename(inprogress_filepath, filename)
return filename
else:
tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
def download_and_extract(path, url, input_filename, target_filename):
"""Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
tf.logging.info("Already downloaded and extracted %s." % url)
return input_file, target_file
# Download archive file if it doesn't already exist.
compressed_file = download_from_url(path, url)
# Extract compressed files
tf.logging.info("Extracting %s." % compressed_file)
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(path)
# Return filepaths of the requested files.
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
return input_file, target_file
raise OSError("Download/extraction failed for url %s to path %s" %
(url, path))
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip()
def compile_files(raw_dir, raw_files, tag):
"""Compile raw files into a single file for each language.
Args:
raw_dir: Directory containing downloaded raw files.
raw_files: Dict containing filenames of input and target data.
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
tag: String to append to the compiled filename.
Returns:
Full path of compiled input and target files.
"""
tf.logging.info("Compiling files with tag %s." % tag)
filename = "%s-%s" % (_PREFIX, tag)
input_compiled_file = os.path.join(raw_dir, filename + ".lang1")
target_compiled_file = os.path.join(raw_dir, filename + ".lang2")
with tf.gfile.Open(input_compiled_file, mode="w") as input_writer:
with tf.gfile.Open(target_compiled_file, mode="w") as target_writer:
for i in range(len(raw_files["inputs"])):
input_file = raw_files["inputs"][i]
target_file = raw_files["targets"][i]
tf.logging.info("Reading files %s and %s." % (input_file, target_file))
write_file(input_writer, input_file)
write_file(target_writer, target_file)
return input_compiled_file, target_compiled_file
def write_file(writer, filename):
"""Write all of lines from file using the writer."""
for line in txt_line_iterator(filename):
writer.write(line)
writer.write("\n")
###############################################################################
# Data preprocessing
###############################################################################
def encode_and_save_files(
subtokenizer, data_dir, raw_files, tag, total_shards):
"""Save data from files as encoded Examples in TFrecord format.
Args:
subtokenizer: Subtokenizer object that will be used to encode the strings.
data_dir: The directory in which to write the examples
raw_files: A tuple of (input, target) data files. Each line in the input and
the corresponding line in target file will be saved in a tf.Example.
tag: String that will be added onto the file names.
total_shards: Number of files to divide the data into.
Returns:
List of all files produced.
"""
# Create a file for each shard.
filepaths = [shard_filename(data_dir, tag, n + 1, total_shards)
for n in range(total_shards)]
if all_exist(filepaths):
tf.logging.info("Files with tag %s already exist." % tag)
return filepaths
tf.logging.info("Saving files with tag %s." % tag)
input_file = raw_files[0]
target_file = raw_files[1]
# Write examples to each shard in round robin order.
tmp_filepaths = [fname + ".incomplete" for fname in filepaths]
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths]
counter, shard = 0, 0
for counter, (input_line, target_line) in enumerate(zip(
txt_line_iterator(input_file), txt_line_iterator(target_file))):
if counter > 0 and counter % 100000 == 0:
tf.logging.info("\tSaving case %d." % counter)
example = dict_to_example(
{"inputs": subtokenizer.encode(input_line, add_eos=True),
"targets": subtokenizer.encode(target_line, add_eos=True)})
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % total_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filepaths, filepaths):
tf.gfile.Rename(tmp_name, final_name)
tf.logging.info("Saved %d Examples", counter)
return filepaths
def shard_filename(path, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%s-%.5d-of-%.5d" % (_PREFIX, _ENCODE_TAG, tag, shard_num, total_shards))
def shuffle_records(fname):
"""Shuffle records in a single file."""
tf.logging.info("Shuffling records in file %s" % fname)
# Rename file prior to shuffling
tmp_fname = fname + ".unshuffled"
tf.gfile.Rename(fname, tmp_fname)
reader = tf.python_io.tf_record_iterator(tmp_fname)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("\tRead: %d", len(records))
random.shuffle(records)
# Write shuffled records to original file name
with tf.python_io.TFRecordWriter(fname) as w:
for count, record in enumerate(records):
w.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("\tWriting record: %d" % count)
tf.gfile.Remove(tmp_fname)
def dict_to_example(dictionary):
"""Converts a dictionary of string->int to a tf.Example."""
features = {}
for k, v in six.iteritems(dictionary):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
return tf.train.Example(features=tf.train.Features(feature=features))
def all_exist(filepaths):
"""Returns true if all files in the list exist."""
for fname in filepaths:
if not tf.gfile.Exists(fname):
return False
return True
def make_dir(path):
if not tf.gfile.Exists(path):
tf.logging.info("Creating directory %s" % | |
'U') : 'UL1',
(15, 78, 'R', 'B') : 'RB1',
(15, 78, 'R', 'D') : 'DR1',
(15, 78, 'R', 'F') : 'RF1',
(15, 78, 'R', 'U') : 'UR1',
(15, 78, 'U', 'B') : 'UB1',
(15, 78, 'U', 'F') : 'UF1',
(15, 78, 'U', 'L') : 'UL1',
(15, 78, 'U', 'R') : 'UR1',
(16, 29, 'B', 'D') : 'DB2',
(16, 29, 'B', 'L') : 'LB2',
(16, 29, 'B', 'R') : 'RB2',
(16, 29, 'B', 'U') : 'UB2',
(16, 29, 'D', 'B') : 'DB0',
(16, 29, 'D', 'F') : 'DF0',
(16, 29, 'D', 'L') : 'DL0',
(16, 29, 'D', 'R') : 'DR0',
(16, 29, 'F', 'D') : 'DF2',
(16, 29, 'F', 'L') : 'LF2',
(16, 29, 'F', 'R') : 'RF2',
(16, 29, 'F', 'U') : 'UF2',
(16, 29, 'L', 'B') : 'LB0',
(16, 29, 'L', 'D') : 'DL2',
(16, 29, 'L', 'F') : 'LF0',
(16, 29, 'L', 'U') : 'UL2',
(16, 29, 'R', 'B') : 'RB0',
(16, 29, 'R', 'D') : 'DR2',
(16, 29, 'R', 'F') : 'RF0',
(16, 29, 'R', 'U') : 'UR2',
(16, 29, 'U', 'B') : 'UB0',
(16, 29, 'U', 'F') : 'UF0',
(16, 29, 'U', 'L') : 'UL0',
(16, 29, 'U', 'R') : 'UR0',
(20, 77, 'B', 'D') : 'DB0',
(20, 77, 'B', 'L') : 'LB0',
(20, 77, 'B', 'R') : 'RB0',
(20, 77, 'B', 'U') : 'UB0',
(20, 77, 'D', 'B') : 'DB2',
(20, 77, 'D', 'F') : 'DF2',
(20, 77, 'D', 'L') : 'DL2',
(20, 77, 'D', 'R') : 'DR2',
(20, 77, 'F', 'D') : 'DF0',
(20, 77, 'F', 'L') : 'LF0',
(20, 77, 'F', 'R') : 'RF0',
(20, 77, 'F', 'U') : 'UF0',
(20, 77, 'L', 'B') : 'LB2',
(20, 77, 'L', 'D') : 'DL0',
(20, 77, 'L', 'F') : 'LF2',
(20, 77, 'L', 'U') : 'UL0',
(20, 77, 'R', 'B') : 'RB2',
(20, 77, 'R', 'D') : 'DR0',
(20, 77, 'R', 'F') : 'RF2',
(20, 77, 'R', 'U') : 'UR0',
(20, 77, 'U', 'B') : 'UB2',
(20, 77, 'U', 'F') : 'UF2',
(20, 77, 'U', 'L') : 'UL2',
(20, 77, 'U', 'R') : 'UR2',
(2, 104, 'B', 'D') : 'DB2',
(2, 104, 'B', 'L') : 'LB2',
(2, 104, 'B', 'R') : 'RB2',
(2, 104, 'B', 'U') : 'UB2',
(2, 104, 'D', 'B') : 'DB0',
(2, 104, 'D', 'F') : 'DF0',
(2, 104, 'D', 'L') : 'DL0',
(2, 104, 'D', 'R') : 'DR0',
(2, 104, 'F', 'D') : 'DF2',
(2, 104, 'F', 'L') : 'LF2',
(2, 104, 'F', 'R') : 'RF2',
(2, 104, 'F', 'U') : 'UF2',
(2, 104, 'L', 'B') : 'LB0',
(2, 104, 'L', 'D') : 'DL2',
(2, 104, 'L', 'F') : 'LF0',
(2, 104, 'L', 'U') : 'UL2',
(2, 104, 'R', 'B') : 'RB0',
(2, 104, 'R', 'D') : 'DR2',
(2, 104, 'R', 'F') : 'RF0',
(2, 104, 'R', 'U') : 'UR2',
(2, 104, 'U', 'B') : 'UB0',
(2, 104, 'U', 'F') : 'UF0',
(2, 104, 'U', 'L') : 'UL0',
(2, 104, 'U', 'R') : 'UR0',
(22, 52, 'B', 'D') : 'DB0',
(22, 52, 'B', 'L') : 'LB0',
(22, 52, 'B', 'R') : 'RB0',
(22, 52, 'B', 'U') : 'UB0',
(22, 52, 'D', 'B') : 'DB2',
(22, 52, 'D', 'F') : 'DF2',
(22, 52, 'D', 'L') : 'DL2',
(22, 52, 'D', 'R') : 'DR2',
(22, 52, 'F', 'D') : 'DF0',
(22, 52, 'F', 'L') : 'LF0',
(22, 52, 'F', 'R') : 'RF0',
(22, 52, 'F', 'U') : 'UF0',
(22, 52, 'L', 'B') : 'LB2',
(22, 52, 'L', 'D') : 'DL0',
(22, 52, 'L', 'F') : 'LF2',
(22, 52, 'L', 'U') : 'UL0',
(22, 52, 'R', 'B') : 'RB2',
(22, 52, 'R', 'D') : 'DR0',
(22, 52, 'R', 'F') : 'RF2',
(22, 52, 'R', 'U') : 'UR0',
(22, 52, 'U', 'B') : 'UB2',
(22, 52, 'U', 'F') : 'UF2',
(22, 52, 'U', 'L') : 'UL2',
(22, 52, 'U', 'R') : 'UR2',
(23, 53, 'B', 'D') : 'DB1',
(23, 53, 'B', 'L') : 'LB1',
(23, 53, 'B', 'R') : 'RB1',
(23, 53, 'B', 'U') : 'UB1',
(23, 53, 'D', 'B') : 'DB1',
(23, 53, 'D', 'F') : 'DF1',
(23, 53, 'D', 'L') : 'DL1',
(23, 53, 'D', 'R') : 'DR1',
(23, 53, 'F', 'D') : 'DF1',
(23, 53, 'F', 'L') : 'LF1',
(23, 53, 'F', 'R') : 'RF1',
(23, 53, 'F', 'U') : 'UF1',
(23, 53, 'L', 'B') : 'LB1',
(23, 53, 'L', 'D') : 'DL1',
(23, 53, 'L', 'F') : 'LF1',
(23, 53, 'L', 'U') : 'UL1',
(23, 53, 'R', 'B') : 'RB1',
(23, 53, 'R', 'D') : 'DR1',
(23, 53, 'R', 'F') : 'RF1',
(23, 53, 'R', 'U') : 'UR1',
(23, 53, 'U', 'B') : 'UB1',
(23, 53, 'U', 'F') : 'UF1',
(23, 53, 'U', 'L') : 'UL1',
(23, 53, 'U', 'R') : 'UR1',
(24, 54, 'B', 'D') : 'DB2',
(24, 54, 'B', 'L') : 'LB2',
(24, 54, 'B', 'R') : 'RB2',
(24, 54, 'B', 'U') : 'UB2',
(24, 54, 'D', 'B') : 'DB0',
(24, 54, 'D', 'F') : 'DF0',
(24, 54, 'D', 'L') : 'DL0',
(24, 54, 'D', 'R') : 'DR0',
(24, 54, 'F', 'D') : 'DF2',
(24, 54, 'F', 'L') : 'LF2',
(24, 54, 'F', 'R') : 'RF2',
(24, 54, 'F', 'U') : 'UF2',
(24, 54, 'L', 'B') : 'LB0',
(24, 54, 'L', 'D') : 'DL2',
(24, 54, 'L', 'F') : 'LF0',
(24, 54, 'L', 'U') : 'UL2',
(24, 54, 'R', 'B') : 'RB0',
(24, 54, 'R', 'D') : 'DR2',
(24, 54, 'R', 'F') : 'RF0',
(24, 54, 'R', 'U') : 'UR2',
(24, 54, 'U', 'B') : 'UB0',
(24, 54, 'U', 'F') : 'UF0',
(24, 54, 'U', 'L') : 'UL0',
(24, 54, 'U', 'R') : 'UR0',
(27, 6, 'B', 'D') : 'DB2',
(27, 6, 'B', 'L') : 'LB2',
(27, 6, 'B', 'R') : 'RB2',
(27, 6, 'B', 'U') : 'UB2',
(27, 6, 'D', 'B') : 'DB0',
(27, 6, 'D', 'F') : 'DF0',
(27, 6, 'D', 'L') : 'DL0',
(27, 6, 'D', 'R') : 'DR0',
(27, 6, 'F', 'D') : 'DF2',
(27, 6, 'F', 'L') : 'LF2',
(27, 6, 'F', 'R') : 'RF2',
(27, 6, 'F', 'U') : 'UF2',
(27, 6, 'L', 'B') : 'LB0',
(27, 6, 'L', 'D') : 'DL2',
(27, 6, 'L', 'F') : 'LF0',
(27, 6, 'L', 'U') : 'UL2',
(27, 6, 'R', 'B') : 'RB0',
(27, 6, 'R', 'D') : 'DR2',
(27, 6, 'R', 'F') : 'RF0',
(27, 6, 'R', 'U') : 'UR2',
(27, 6, 'U', 'B') : 'UB0',
(27, 6, 'U', 'F') : 'UF0',
(27, 6, 'U', 'L') : 'UL0',
(27, 6, 'U', 'R') : 'UR0',
(28, 11, 'B', 'D') : 'DB1',
(28, 11, 'B', 'L') : 'LB1',
(28, 11, 'B', 'R') : 'RB1',
(28, 11, 'B', 'U') : 'UB1',
(28, 11, 'D', 'B') : 'DB1',
(28, 11, 'D', 'F') : 'DF1',
(28, 11, 'D', 'L') : 'DL1',
(28, 11, 'D', 'R') : 'DR1',
(28, 11, 'F', 'D') : 'DF1',
(28, 11, 'F', 'L') : 'LF1',
(28, 11, 'F', 'R') : 'RF1',
(28, 11, 'F', 'U') : 'UF1',
(28, 11, 'L', 'B') : 'LB1',
(28, 11, 'L', 'D') : 'DL1',
(28, 11, 'L', 'F') : 'LF1',
(28, 11, 'L', 'U') : 'UL1',
(28, 11, 'R', 'B') : 'RB1',
(28, 11, 'R', 'D') : 'DR1',
(28, 11, 'R', 'F') : 'RF1',
(28, 11, 'R', 'U') : 'UR1',
(28, 11, 'U', 'B') : 'UB1',
(28, 11, 'U', 'F') : 'UF1',
(28, 11, 'U', 'L') : 'UL1',
(28, 11, 'U', 'R') : 'UR1',
(29, 16, 'B', 'D') : 'DB0',
(29, 16, 'B', 'L') : 'LB0',
(29, 16, 'B', 'R') : 'RB0',
(29, 16, 'B', 'U') : 'UB0',
(29, 16, 'D', 'B') : 'DB2',
(29, 16, 'D', 'F') : 'DF2',
(29, 16, 'D', 'L') : 'DL2',
(29, 16, 'D', 'R') : 'DR2',
(29, 16, 'F', 'D') : 'DF0',
(29, 16, 'F', 'L') : 'LF0',
(29, 16, 'F', 'R') : 'RF0',
(29, 16, 'F', 'U') : 'UF0',
(29, 16, 'L', 'B') : | |
of the lines separating cells, by default 0
colour : str
The colour to use for the plot, by default None. If None, uses the
default colours.
time_ind : int, optional
In each experiment, the state update heatmap is stored at multiple
timesteps at certain intervals. This parameter controls which of the
update heatmaps to plot, with 0 being the first and -1 being the last
heatmap saved, by default -1.
Returns
-------
plt.figure, plt.Axes3D
The figure and axis plotten on
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection="3d")
if colour is None:
colour = DEFAULT_COLOURS[0]
hp = next(iter(data["experiment_data"]))
update_states = []
for run in data["experiment_data"][hp]["runs"]:
update_states.append(run["update_states"][time_ind])
update_states = np.stack(update_states).mean(axis=0)
ind = np.where(update_states > max_)[0]
update_states[ind] = max_
update_states = update_states.reshape((100, 100))
update_states = ndimage.filters.gaussian_filter(update_states,
sigma=smooth)
x = np.linspace(-np.pi, np.pi, 100)
y = np.linspace(-8, 8, 100)
x, y = np.meshgrid(x, y)
ax.set_zlim3d(0, max_)
ax.view_init(view[0], view[1])
ax.grid(None)
if type_ == "surface":
ax.plot_surface(x, y, update_states, antialiased=False,
linewidth=linewidth, color=colour)
elif type_ == "wireframe":
ax.plot_wireframe(x, y, update_states, antialiased=False,
linewidth=linewidth, color=colour)
elif type_ == "bar":
# Get the min/max x/y values
env_config = data["experiment"]["environment"]
xmin, ymin = env_config["state_low"]
xmax, ymax = env_config["state_high"]
# Determine the width, depth, and height of barx
dx = [(xmax - xmin) / 100] * len(x.ravel())
dy = [(ymax - ymin) / 100] * len(y.ravel())
dz = update_states.ravel()
z = [0] * len(y.ravel())
ax.bar3d(x.ravel(), y.ravel(), z, dx=dx, dy=dy, dz=dz, color=colour)
fig.show()
return fig, ax
def return_distribution(data, type_, hp_ind, bins, figsize=(12, 6), xlim=None,
ylim=None, after=0, before=-1):
"""
Plots the distribution of returns on either an episodic or continuing
environment
Parameters
----------
data : dict
The data dictionary containing the runs of a single hyperparameter
setting
type_ : str, optional
The type of surface to plot, by default "surface". One of 'surface',
'wireframe', or 'bar'
hp_ind : int, optional
The hyperparameter settings index in the data dictionary to use for
the plot, by default -1. If less than 0, then the first hyperparameter
setting in the dictionary is used.
bins : Iterable, int
The bins to use for the plot. If an Iterable, then each value in the
Iterable is considered as a cutoff for bins. If an integer, separates
the returns into that many bins
figsize : tuple, optional
The size of the figure to plot, by default (15, 10)
xlim : 2-tuple of float, optional
The cutoff points for the x-axis to plot between, by default None
ylim : 2-tuple of float, optional
The cutoff points for the y-axis to plot between, by default None
Returns
-------
plt.figure, plt.Axes3D
The figure and axis plotten on
"""
# Get the episode returns for each run
run_returns = []
return_type = type_ + "_episode_rewards"
for run in data["experiment_data"][hp_ind]["runs"]:
run_returns.append(np.mean(run[return_type][after:before]))
title = f"Learning Curve Distribution - HP Settings {hp_ind}"
return _return_histogram(run_returns, bins, figsize, title, xlim, ylim)
def _return_histogram(run_returns, bins, figsize, title, xlim, ylim, kde=True):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.set_title(title)
ax.set_xlabel("Average Return Per Run")
ax.set_ylabel("Relative Frequency")
_ = sns.histplot(run_returns, bins=bins, kde=kde)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
# Plot relative frequency on the y-axis
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos:
"{:.2f}".format(x / len(run_returns))))
fig.show()
return fig, ax
def inds_vs_hp_heatmap(dir_, type_, hp1, hp2, inds, env_config, agent_config,
figsize=(12, 8), annotate=True, fmt=".2g",
rasterize=False, cmap=CMAP, linewidths=0,
env_type="continuing", default_value=-1000, after=0,
before=None, fig=None, ax=None, vmin=None, vmax=None):
"""
Plots the mean returns of one hyperparameter vs another.
Given a list of hyperparameter indices and two hyperparameter names,
plots the mean returns generated by varying the second hyperparameter,
and leaving the first hyperparameter fixed as each hyperparameter index's
value. The plot is a heatmap. In this way, each row of the heatmap will
be a certain HP setting with a fixed HP, while the columns will be the
mean returns given the second HP varying for each HP index.
Parameters
----------
dir_ : str
The directory which contains all data dictionaries
type_ : str
The type of returns to consider, one of 'train' or 'eval'
hp1 : str
The name of the hyperparameter setting to vary on the x-axis
hp2 : str
The name of the hyperparameter to plot on the y-axis for each
hyperparameter settings index plotted
inds : Iterable of int
An iterable of hyperparameter indices used to plot
env_config : dict
The environment configuration file used to run the experiment
agent_config : dict
The agent configuration file used to run the experiment
figsize : tuple, optional
The size of the figure, by default (12, 8)
annotate : bool, optional
Whether each box in the heatmap should be annotated with its value,
by default True
fmt : str, optional
If annotating, the format to use, by default ".2g"
rasterize : bool, optional
Whether or not the final heatmap should be rasterized, by default False
cmap : str, optional
The colourmap to use to plot, by default CMAP
linewidths : int, optional
The width of the lines used to separate boxes in the heatmap, by
default 0
env_type : str, optional
Whether the environment is continuing or episodic, by default
"continuing". One of 'episodic', 'continuing'
default_value : int, optional
The default value to use for heatmap boxes when no valid second
hyperparameter can be used for the first hyperparameter, by default
-1000. This happens, for example in hyperparameter settings that would
have batch size > replay capacity.
after : int, optional
Only consider returns after this episode, by default 0
before : int, optional
Only consider returns before this episode, by default None
fig : plt.Figure, optional
The figure to plot on, by default None. If None, a new Figure is
created.
ax : plt.Axes, optional
The axis to plot on, by default None. If None, a new Axes is created.
vmin : float, optional
Minimum value to anchor the colourmap to, otherwise it is inferred
from the data.
vmax : float, optional
Maximum value to anchor the colourmap to, otherwise it is inferred
from the data.
Returns
-------
plt.Figure, plt.Axes
The figure and axis plotted on
Raises
------
ValueError
If the figure is not specified but the axis is
If the type of return considered is not one of "train" or "eval"
If an illegal environment type is given
"""
if fig is None and ax is not None:
raise ValueError("when ax is specified, fig should also be specified")
if type_ not in ("train", "eval"):
raise ValueError("type_ must be one of 'train' or 'eval'")
if env_type not in ("episodic", "continuing"):
raise ValueError("unknown environment type")
values = []
for ind in inds:
# Get all the combinations of hyperparameters with only hp_name varying
hp1_varying_only_settings = \
exp.get_varying_single_hyperparam(env_config, agent_config, hp1)
# Get the index (into the list of tuples with only hp_name varying) of
# the tuple which has hp_name varying only, but all other constant
# hyperparameters equal to those defined by hyperparam setting ind.
# So hp_combo_constant_hps_equal_ind is the hyperparam combination with
# all constant hyperparams equal to those in settings number ind.
hp_combo_constant_hps_equal_ind = \
next(filter(lambda x: ind in x, hp1_varying_only_settings))
combo_index = hp1_varying_only_settings.index(
hp_combo_constant_hps_equal_ind)
# Get the mean returns for each hyperparameter setting
mean_returns = []
for elem in hp1_varying_only_settings[combo_index]:
if elem is None:
# No hyperparameter setting for this combo of hyperparams
# e.g. batch size > buffer size
# Use the default value for the return
mean_returns.append(default_value)
continue
# Open the data file and compute the mean returns
agent = agent_config["agent_name"]
env = env_config["env_name"]
dir_ = dir_.rstrip("/")
file_ = dir_ + f"/{env}_{agent}_hp-{elem}.pkl"
with open(file_, "rb") as in_file:
data = pickle.load(in_file)
# Calculate and save the mean return for this HP setting
mean_return = exp.get_returns(data, type_, elem, env_type)
if before is not None:
mean_return = mean_return[:, after:before].mean()
else:
mean_return = mean_return[:, after:].mean()
mean_returns.append(mean_return)
values.append(mean_returns)
all_mean_returns = np.stack(values)
# Create the figure and axis
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
# Plot the heatmap | |
<filename>github_code/Discrete_GP_inv_1D_Burgers_equ_RK4.py
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 15 11:34:00 2019
@author: gpang
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
#from SALib.sample import sobol_sequence
import scipy as sci
import scipy.io as sio
class one_GP:
def __init__(self):
pass
def model(self, dataset, dt, prior_mean_train, prior_mean_test, previous_cov_mat, a, b, c, un_u, un_f, un_t, kernel_type = 'SE', is_noise = True):
self.xu_train = dataset['xu_train']
self.yu_train = dataset['yu_train']
self.xf_train = dataset['xf_train']
self.yf_train = dataset['yf_train']
self.xu_test = dataset['xu_test']
self.yu_test = dataset['yu_test']
self.xf_test = dataset['xf_test']
self.yf_test = dataset['yf_test']
self.un_u = un_u
self.un_f = un_f
self.un_t = un_t
self.kernel_type = kernel_type
self.dt = dt
self.prior_mean_train = prior_mean_train
self.prior_mean_test = prior_mean_test
self.previous_cov_mat=previous_cov_mat
self.dim = self.xf_train.shape[1]
self.is_noise = is_noise
self.a = a
self.b = b
self.c = c
def kernel(self, X, Y, t1, equal=False, diag=False):
if self.kernel_type == 'SE':
if diag == False:
return tf.exp(-0.5* (X-Y.T)**2/t1**2)
else:
return tf.ones((X.shape[0],1),dtype=tf.float64)
elif self.kernel_type == 'Matern1':
dist = tf.sqrt(self.square_dist(X,Y,t1,equal))
return (1.0+3.0**0.5*dist)*tf.exp(-3.0**0.5*dist)
elif self.kernel_type == 'Matern2':
dist = tf.sqrt(self.square_dist(X,Y,t1,equal))
return (1.0+5.0**0.5*dist+5.0/3.0*dist**2)*tf.exp(-5.0**0.5*dist)
def kx(self, X, Y, t1, diag=False):
Y = Y.T
if diag == False:
return (Y-X)/t1**2*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return tf.zeros((X.shape[0],1),dtype=tf.float64)
def ky(self, X, Y, t1, diag=False):
Y = Y.T
if diag == False:
return (X-Y)/t1**2*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return tf.zeros((X.shape[0],1),dtype=tf.float64)
def kxx(self, X, Y, t1, diag=False):
Y = Y.T
if diag==False:
return (-1.0/t1**2+(X-Y)**2/t1**4)*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return -1.0/t1**2 * tf.ones((X.shape[0],1),dtype=tf.float64)
def kyy(self, X, Y, t1, diag=False):
Y = Y.T
if diag==False:
return (-1.0/t1**2+(X-Y)**2/t1**4)*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return -1.0/t1**2 * tf.ones((X.shape[0],1),dtype=tf.float64)
def kxy(self, X, Y, t1, diag=False):
Y = Y.T
if diag==False:
return (1.0/t1**2-(X-Y)**2/t1**4)*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return 1.0/t1**2*tf.ones((X.shape[0],1),dtype=tf.float64)
def kyyx(self, X, Y, t1, diag=False):
Y = Y.T
if diag==False:
return (3*(X-Y)/t1**4-(X-Y)**3/t1**6)*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return tf.zeros((X.shape[0],1),dtype=tf.float64)
def kyxx(self, X, Y, t1, diag=False):
Y = Y.T
if diag==False:
return (3*(Y-X)/t1**4+(X-Y)**3/t1**6)*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return tf.zeros((X.shape[0],1),dtype=tf.float64)
def kxxyy(self, X, Y, t1, diag=False):
Y = Y.T
if diag==False:
return (3.0/t1**4-6*(X-Y)**2/t1**6+(X-Y)**4/t1**8)*tf.exp(-0.5*(X-Y)**2/t1**2)
else:
return 3.0/t1**4*tf.ones((X.shape[0],1),dtype=tf.float64)
def Lap2_kernel(self, X, Y, t1, lambda1, lambda2, un_x, un_y, equal=False, diag=False):
unx = np.ndarray.flatten(un_x)
uny = np.ndarray.flatten(un_y)
unx = tf.diag(unx)
uny = tf.diag(uny)
if self.kernel_type == 'SE':
if diag == False:
k = lambda1**2*tf.matmul(tf.matmul(unx,self.kxy(X,Y,t1,diag)),uny)-lambda1*lambda2*tf.matmul(unx,self.kyyx(X,Y,t1,diag))\
-lambda1*lambda2*tf.matmul(self.kyxx(X,Y,t1,diag),uny)+lambda2**2*self.kxxyy(X,Y,t1,diag)
else:
k = lambda1**2* un_x**2*self.kxy(X,Y,t1,diag)-lambda1*lambda2*un_x*self.kyyx(X,Y,t1,diag)\
-lambda1*lambda2*un_y*self.kyxx(X,Y,t1,diag)+lambda2**2*self.kxxyy(X,Y,t1,diag)
return k
def Lap1_kernel(self, X, Y, t1, lambda1, lambda2, un_x, un_y, equal=False, diag=False): ## -\Delta rather than \Delta
if self.kernel_type == 'SE':
unx = np.ndarray.flatten(un_x)
uny = np.ndarray.flatten(un_y)
unx = tf.diag(unx)
uny = tf.diag(uny)
if diag == False:
k = lambda1*tf.matmul(unx,self.kx(X,Y,t1,diag))-lambda2*self.kxx(X,Y,t1,diag)
else:
k = lambda1*un_x*self.kx(X,Y,t1,diag)-lambda2*self.kxx(X,Y,t1,diag)
return k
def Lap1_kernel_prime(self, X, Y, t1, lambda1, lambda2, un_x, un_y, equal=False, diag=False): ## -\Delta rather than \Delta
if self.kernel_type == 'SE':
unx = np.ndarray.flatten(un_x)
uny = np.ndarray.flatten(un_y)
unx = tf.diag(unx)
uny = tf.diag(uny)
if diag == False:
k = lambda1*tf.matmul(self.ky(X,Y,t1,diag),uny)-lambda2*self.kyy(X,Y,t1,diag)
else:
k = lambda1*un_y*self.ky(X,Y,t1,diag)-lambda2*self.kyy(X,Y,t1,diag)
return k
def kernel_uf_train(self, Xu, Xf, t1, t3, t5, a, b, c, lambda1, lambda2, un_u, un_f, dt, diag=False):
if self.kernel_type == 'SE':
if diag == False:
ku3u3 = self.kernel(Xu[2], Xu[2], t1, equal=True)
ku2u2 = self.kernel(Xu[1], Xu[1], t3, equal=True)
ku1u1 = self.kernel(Xu[0], Xu[0], t5, equal=True)
kf3f3 = self.kernel(Xf, Xf, t1, equal=True) \
+ dt**2*b[0]**2*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, equal=True) \
+ dt**2*b[1]**2*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, equal=True)
kf2f2 = self.kernel(Xf, Xf, t3, equal=True) \
+ dt*a[1,1]*self.Lap1_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, equal=True) \
+ dt*a[1,1]*self.Lap1_kernel_prime(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, equal=True)\
+dt**2*a[1,0]**2*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, equal=True) \
+dt**2*a[1,1]**2*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, equal=True)
kf1f1 = self.kernel(Xf, Xf, t5, equal=True) \
+dt*a[0,0]*self.Lap1_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, equal=True)\
+dt*a[0,0]*self.Lap1_kernel_prime(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, equal=True)\
+dt**2*a[0,1]**2*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, equal=True) \
+dt**2*a[0,0]**2*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, equal=True)
kf3u3 = self.kernel(Xf, Xu[2], t1)
kf3u2 = dt*b[1]*self.Lap1_kernel(Xf, Xu[1], t3, lambda1, lambda2, un_f, un_u[1])
kf2u2 = self.kernel(Xf, Xu[1], t3) + dt*a[1,1]*self.Lap1_kernel(Xf,Xu[1],t3,lambda1, lambda2, un_f, un_u[1])
kf1u2 = dt*a[0,1]*self.Lap1_kernel(Xf, Xu[1], t3, lambda1, lambda2, un_f, un_u[1])
kf3u1 = dt*b[0]*self.Lap1_kernel(Xf, Xu[0], t5, lambda1, lambda2, un_f, un_u[0])
kf2u1 = dt*a[1,0]*self.Lap1_kernel(Xf, Xu[0], t5, lambda1, lambda2, un_f, un_u[0])
kf1u1 = self.kernel(Xf, Xu[0], t5) + dt*a[0,0]*self.Lap1_kernel(Xf, Xu[0], t5, lambda1, lambda2, un_f, un_u[0])
kf2f3 = dt*b[1]*self.Lap1_kernel_prime(Xf, Xf, t3, lambda1, lambda2, un_f, un_f) \
+dt**2*b[0]*a[1,0]*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f) \
+dt**2*b[1]*a[1,1]*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f)
kf1f3 = dt*b[0]*self.Lap1_kernel_prime(Xf, Xf, t5, lambda1, lambda2, un_f, un_f) \
+ dt**2*b[0]*a[0,0]*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f) \
+ dt**2*b[1]*a[0,1]*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f)
kf1f2 = dt*a[0,1]*self.Lap1_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f) \
+dt*a[1,0]*self.Lap1_kernel_prime(Xf, Xf, t5, lambda1, lambda2, un_f, un_f) \
+ dt**2*a[1,0]*a[0,0]*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f)\
+ dt**2*a[1,1]*a[0,1]*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f)
zu3u2 = tf.zeros((Xu[2].shape[0],Xu[1].shape[0]),dtype=tf.float64)
zu3u1 = tf.zeros((Xu[2].shape[0],Xu[0].shape[0]),dtype=tf.float64)
zu2u1 = tf.zeros((Xu[1].shape[0],Xu[0].shape[0]),dtype=tf.float64)
zu3f = tf.zeros((Xu[2].shape[0],Xf.shape[0]),dtype=tf.float64)
zfu3 = tf.zeros((Xf.shape[0],Xu[2].shape[0]),dtype=tf.float64)
k1 = tf.concat( (ku3u3, zu3u2, zu3u1, tf.transpose(kf3u3), zu3f, zu3f),axis=1)
k2 = tf.concat( (tf.transpose(zu3u2), ku2u2, zu2u1, tf.transpose(kf3u2), tf.transpose(kf2u2), tf.transpose(kf1u2)),axis=1)
k3 = tf.concat( (tf.transpose(zu3u1), tf.transpose(zu2u1), ku1u1, tf.transpose(kf3u1), tf.transpose(kf2u1), tf.transpose(kf1u1)),axis=1)
k4 = tf.concat( (kf3u3, kf3u2, kf3u1, kf3f3, tf.transpose(kf2f3), tf.transpose(kf1f3)),axis=1)
k5 = tf.concat( (zfu3, kf2u2, kf2u1, kf2f3, kf2f2, tf.transpose(kf1f2)),axis=1)
k6 = tf.concat( (zfu3, kf1u2, kf1u1, kf1f3, kf1f2, kf1f1),axis=1)
k = tf.concat((k1,k2,k3,k4,k5,k6),axis=0)
return k
else:
ku3u3 = self.kernel(Xu[2], Xu[2], t1, diag=True)
ku2u2 = self.kernel(Xu[1], Xu[1], t3, diag=True)
ku1u1 = self.kernel(Xu[0], Xu[0], t5, diag=True)
kf3f3 = self.kernel(Xf, Xf, t1, diag=True) \
+ dt**2*b[0]**2*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, diag=True) \
+ dt**2*b[1]**2*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, diag=True)
kf2f2 = self.kernel(Xf, Xf, t3, diag=True) \
+ 2.0*dt*a[1,1]*self.Lap1_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, diag=True) \
+dt**2*a[1,0]**2*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, diag=True) \
+dt**2*a[1,1]**2*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, diag=True)
kf1f1 = self.kernel(Xf, Xf, t5, diag=True) \
+2.0*dt*a[0,0]*self.Lap1_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, diag=True)\
+dt**2*a[0,1]**2*self.Lap2_kernel(Xf, Xf, t3, lambda1, lambda2, un_f, un_f, diag=True) \
+dt**2*a[0,0]**2*self.Lap2_kernel(Xf, Xf, t5, lambda1, lambda2, un_f, un_f, diag=True)
return tf.concat((ku3u3,ku2u2,ku1u1,kf3f3, kf2f2, kf1f1),axis=0)
def kernel_u_test(self, Xt, Xu, Xf, t1, t3, t5, a, b, c, lambda1, lambda2, un_u, un_f, un_t, dt):
if self.kernel_type == 'SE':
ku3u3 = self.kernel(Xt, Xu[2], t1)
ku2u2 = self.kernel(Xt, Xu[1], t3)
ku1u1 = self.kernel(Xt, Xu[0], t5)
ku3f3 = self.kernel(Xt, Xf, t1)
ku2f3 = dt*b[1]*self.Lap1_kernel_prime(Xt, Xf, t3,lambda1, lambda2, un_t, un_f )
ku2f2 = self.kernel(Xt, Xf, t3) + dt*a[1,1]*self.Lap1_kernel_prime(Xt,Xf,t3,lambda1, lambda2, un_t, un_f)
ku2f1 = dt*a[0,1]*self.Lap1_kernel_prime(Xt, Xf, t3, lambda1, lambda2, un_t, un_f)
ku1f3 = dt*b[0]*self.Lap1_kernel_prime(Xt, Xf, t5, lambda1, lambda2, un_t, un_f)
ku1f2 = dt*a[1,0]*self.Lap1_kernel_prime(Xt, Xf, t5, lambda1, lambda2, un_t, un_f)
ku1f1 = self.kernel(Xt, Xf, t5) + dt*a[0,0]*self.Lap1_kernel_prime(Xt, Xf, t5, lambda1, lambda2, un_t, un_f)
zuu3 = tf.zeros((Xt.shape[0],Xu[2].shape[0]),dtype=tf.float64)
zuu2 = tf.zeros((Xt.shape[0],Xu[1].shape[0]),dtype=tf.float64)
zuu1 = tf.zeros((Xt.shape[0],Xu[0].shape[0]),dtype=tf.float64)
zuf = tf.zeros((Xt.shape[0],Xf.shape[0]),dtype=tf.float64)
k1 = tf.concat( (ku3u3, zuu2, zuu1, ku3f3, zuf, zuf),axis=1)
k2 = tf.concat( (zuu3, ku2u2, zuu1, ku2f3, ku2f2, ku2f1),axis=1)
k3 = tf.concat( (zuu3, zuu2, ku1u1, ku1f3, ku1f2, ku1f1),axis=1)
k = tf.concat((k1,k2,k3),axis=0)
return k
def kernel_f_test(self, Xt, Xu, Xf, t1, t3, t5, a, b, c, lambda1, lambda2, un_u, un_f, un_t, dt):
if self.kernel_type == 'SE':
kf3f3 = self.kernel(Xt, Xf, t1) \
+ dt**2*b[0]**2*self.Lap2_kernel(Xt, Xf, t5, lambda1, lambda2, un_t, un_f) \
+ dt**2*b[1]**2*self.Lap2_kernel(Xt, Xf, t3, lambda1, lambda2, un_t, un_f)
kf2f2 = self.kernel(Xt, Xf, t3) \
+ dt*a[1,1]*self.Lap1_kernel(Xt, Xf, t3, lambda1, lambda2, un_t, un_f) \
+ dt*a[1,1]*self.Lap1_kernel_prime(Xt, Xf, t3, lambda1, lambda2, un_t, un_f)\
+dt**2*a[1,0]**2*self.Lap2_kernel(Xt, Xf, t5, lambda1, lambda2, un_t, un_f) \
+dt**2*a[1,1]**2*self.Lap2_kernel(Xt, Xf, t3, lambda1, lambda2, un_t, un_f)
kf1f1 = self.kernel(Xt, Xf, t5) \
+dt*a[0,0]*self.Lap1_kernel(Xt, Xf, t5, lambda1, lambda2, un_t, un_f)\
+dt*a[0,0]*self.Lap1_kernel_prime(Xt, Xf, t5, lambda1, lambda2, un_t, un_f)\
+dt**2*a[0,1]**2*self.Lap2_kernel(Xt, Xf, t3, lambda1, lambda2, un_t, un_f) \
+dt**2*a[0,0]**2*self.Lap2_kernel(Xt, Xf, t5, lambda1, lambda2, un_t, un_f)
kf3u3 = self.kernel(Xt, Xu[2], t1)
kf3u2 = dt*b[1]*self.Lap1_kernel(Xt, Xu[1], t3, lambda1, lambda2, un_t, un_u[1])
kf2u2 = self.kernel(Xt, Xu[1], t3) + dt*a[1,1]*self.Lap1_kernel(Xt,Xu[1],t3,lambda1, lambda2, un_t, un_u[1])
kf1u2 = dt*a[0,1]*self.Lap1_kernel(Xt, Xu[1], t3, lambda1, lambda2, un_t, un_u[1])
kf3u1 = dt*b[0]*self.Lap1_kernel(Xt, Xu[0], t5, | |
"""
Dataset Specification
Lookup information from the database about the data, particularly which files
are contained in a specific dataset (defined by a `run` global flag) and what
each correlator input is connected to.
Dataspec Format
===============
.. deprecated:: pass1
Use `run` global flag instead.
A dataspec is just a dictionary with three required keys.
:name:
A short name given to the dataset.
:instrument:
The name of the instrument that took the data.
:timerange:
A specification of the time range, or ranges that make up the dataset.
The time range is specified either as a dictionary with `start` and `end`
keys, containing datetime objects (in UTC). Or it can be a list of such
ditionaries, to specify multiple time ranges to include. This can be contained
in a dataspec YAML file, and loaded using :class:`LoadDataspec`. Example:
.. code-block:: yaml
datasets:
- name: A
instrument: blanchard
timerange:
- start: 2014-07-26 03:00:00
end: 2014-07-28 01:00:00
- start: 2014-07-28 11:00:00
end: 2014-07-31 00:00:00
"""
import os
from caput import mpiutil, config, pipeline
from draco.core import task
from chimedb import data_index as di
from ch_util import tools, ephemeris, finder, layout
_DEFAULT_NODE_SPOOF = {"cedar_online": "/project/rpp-krs/chime/chime_online/"}
def _force_list(val):
"""Ensure configuration property is a list."""
if val is None:
return []
elif hasattr(val, "__iter__"):
return val
else:
return [val]
class QueryDatabase(task.MPILoggedTask):
"""Find files from specified database queries.
This routine will query the database as specified in the runtime
configuration file.
Attributes
----------
node_spoof : dictionary
(default: {'cedar_online': '/project/rpp-krs/chime/chime_online/'} )
host and directory in which to find data.
start_time, end_time : string (default: None)
start and end times to restrict the database search to
can be in any format ensure_unix will support, including e.g.
20190116T150323 and 2019-1-16 08:03:23 -7
acqtype : string (default: 'corr')
Type of acquisition. Options for acqtype are: 'corr', 'hk', 'weather',
'rawadc', 'gain', 'flaginput', 'digitalgain'.
instrument : string (optional)
Set the instrument name. Common ArchiveInst names are: 'chimeN2',
'chimestack', 'chime26m', 'chimetiming', 'chimecal', 'mingun' etc.
While acqtype returns all 'corr' data, one must specify the instrument
to get e.g. only stacked data (i.e. instrument = 'chimestack')
source_26m : string (default: None)
holography source to include. If None, do not include holography data.
exclude_daytime : bool (default: False)
exclude daytime data
exclude_sun : bool (default: False)
exclude data around Sun
exclude_sun_time_delta : float (default: None)
time_delta parameter passed to exclude_sun()
exclude_sun_time_delta_rise_set : float (default: None)
time_delta_rise_set parameter passed to exclude_sun()
exclude_transits : list of string or float (default: [])
if set, call exclude_transits(). Pass list of sources or RA to exclude.
exclude_transits_time_delta : list of float (default : [])
time in seconds to exclude around each source transit given in `exclude_transits`.
if single value is passed, then that value will be applied to all source transits.
include_transits : list of string or float (default : [])
if set, call include_transits(). Pass list of sources or RA to include.
include_transits_time_delta : list of float (default : [])
time in seconds to include around each source transit given in `include_transits`.
if single value is passed, then that value will be applied to all source transits.
start_RA, end_RA : float (default: None)
starting and ending RA to include. Both values must be included or
no effect
run_name : string (default: None)
run name to include. If used, all other parameters will be ignored.
accept_all_global_flags : bool (default: False)
Accept all global flags. Due to a bug as of 2019-1-16, this may need to
be set to True
exclude_data_flag_types: list of string
Reject time intervals that overlap with DataFlags of these types.
return_intervals : bool (default: False)
Return the full interval from the Finder. Otherwise only a list of file names.
"""
return_intervals = config.Property(proptype=bool, default=False)
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
acqtype = config.Property(proptype=str, default="corr")
instrument = config.Property(proptype=str, default=None)
source_26m = config.Property(proptype=str, default=None)
start_time = config.Property(default=None)
end_time = config.Property(default=None)
start_csd = config.Property(proptype=float, default=None)
end_csd = config.Property(proptype=float, default=None)
exclude_daytime = config.Property(proptype=bool, default=False)
exclude_sun = config.Property(proptype=bool, default=False)
exclude_sun_time_delta = config.Property(proptype=float, default=None)
exclude_sun_time_delta_rise_set = config.Property(proptype=float, default=None)
exclude_transits = config.Property(proptype=_force_list, default=[])
exclude_transits_time_delta = config.Property(proptype=_force_list, default=[])
include_transits = config.Property(proptype=_force_list, default=[])
include_transits_time_delta = config.Property(proptype=_force_list, default=[])
start_RA = config.Property(proptype=float, default=None)
end_RA = config.Property(proptype=float, default=None)
run_name = config.Property(proptype=str, default=None)
accept_all_global_flags = config.Property(proptype=bool, default=False)
exclude_data_flag_types = config.Property(proptype=list, default=[])
def setup(self):
"""Query the database and fetch the files
Returns
-------
files : list
List of files to load
"""
files = None
# Query the database on rank=0 only, and broadcast to everywhere else
if mpiutil.rank0:
if self.run_name:
return self.QueryRun()
layout.connect_database()
f = finder.Finder(node_spoof=self.node_spoof)
f.filter_acqs(di.AcqType.name == self.acqtype)
if self.instrument is not None:
f.filter_acqs(di.ArchiveInst.name == self.instrument)
if self.accept_all_global_flags:
f.accept_all_global_flags()
# Use start and end times if set, or try and use the start and end CSDs
if self.start_time:
st, et = self.start_time, self.end_time
elif self.start_csd:
st = ephemeris.csd_to_unix(self.start_csd)
et = (
ephemeris.csd_to_unix(self.end_csd)
if self.end_csd is not None
else None
)
# Note: include_time_interval includes the specified time interval
# Using this instead of set_time_range, which only narrows the interval
# f.include_time_interval(self.start_time, self.end_time)
f.set_time_range(st, et)
if self.start_RA and self.end_RA:
f.include_RA_interval(self.start_RA, self.end_RA)
elif self.start_RA or self.start_RA:
self.log.warning(
"One but not both of start_RA and end_RA " "are set. Ignoring both."
)
f.filter_acqs(di.ArchiveInst.name == self.instrument)
if self.exclude_daytime:
f.exclude_daytime()
if self.exclude_sun:
f.exclude_sun(
time_delta=self.exclude_sun_time_delta,
time_delta_rise_set=self.exclude_sun_time_delta_rise_set,
)
if self.include_transits:
time_delta = self.include_transits_time_delta
ntime_delta = len(time_delta)
if (ntime_delta > 1) and (ntime_delta < len(self.include_transits)):
raise ValueError(
"Must specify `time_delta` for each source in "
"`include_transits` or provide single value for all sources."
)
for ss, src in enumerate(self.include_transits):
tdelta = time_delta[ss % ntime_delta] if ntime_delta > 0 else None
bdy = (
ephemeris.source_dictionary[src]
if isinstance(src, str)
else src
)
f.include_transits(bdy, time_delta=tdelta)
if self.exclude_transits:
time_delta = self.exclude_transits_time_delta
ntime_delta = len(time_delta)
if (ntime_delta > 1) and (ntime_delta < len(self.exclude_transits)):
raise ValueError(
"Must specify `time_delta` for each source in "
"`exclude_transits` or provide single value for all sources."
)
for ss, src in enumerate(self.exclude_transits):
tdelta = time_delta[ss % ntime_delta] if ntime_delta > 0 else None
bdy = (
ephemeris.source_dictionary[src]
if isinstance(src, str)
else src
)
f.exclude_transits(bdy, time_delta=tdelta)
if self.source_26m:
f.include_26m_obs(self.source_26m)
if len(self.exclude_data_flag_types) > 0:
f.exclude_data_flag_type(self.exclude_data_flag_types)
results = f.get_results()
if not self.return_intervals:
files = [fname for result in results for fname in result[0]]
files.sort()
else:
files = results
files.sort(key=lambda x: x[1][0])
files = mpiutil.world.bcast(files, root=0)
# Make sure all nodes have container before return
mpiutil.world.Barrier()
return files
class QueryRun(task.MPILoggedTask):
"""Find the files belonging to a specific `run`.
This routine will query the database for the global flag corresponding to
the given run, and will use the start and end times (as well as the
instrument) to return a list of the contained files.
Attributes
----------
run_name : str
Name of the `run` defined in the database.
node_spoof : str, optional
Node spoof argument. See documentation of :class:`ch_util.finder.Finder`.
"""
run_name = config.Property(proptype=str)
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
def setup(self):
"""Fetch the files in the specified run.
Returns
-------
files : list
List of files to load
"""
from ch_util import layout
from chimedb import data_index as di
files = None
# Query the database on rank=0 only, and broadcast to everywhere else
if mpiutil.rank0:
layout.connect_database()
cat_run = (
layout.global_flag_category.select()
.where(layout.global_flag_category.name == "run")
.get()
)
# Find run in database
run_query = layout.global_flag.select().where(
layout.global_flag.category == cat_run,
layout.global_flag.name == self.run_name,
)
# Make sure we only have flags with active events
run_query = (
run_query.join(layout.graph_obj)
.join(layout.event)
.where(layout.event.active)
)
if run_query.count() == 0:
raise RuntimeError("Run %s not found in database" % self.run_name)
elif run_query.count() > 1:
raise RuntimeError(
"Multiple global flags found in database for run %s" % self.run_name
)
run = run_query.get()
# Fetch run start and end time
run_event = run.event().get()
start, end = run_event.start.time, run_event.end.time
# Fetch the instrument
if run.inst is None:
raise RuntimeError("Instrument is not specified in database.")
inst_obj = run.inst
# Create a finder object limited to the relevant time
fi = finder.Finder(node_spoof=self.node_spoof)
fi.only_corr()
# Set the time range that encapsulates all the intervals
fi.set_time_range(start, end)
# Add in all the time ranges
# for ti in timerange:
# fi.include_time_interval(ti['start'], ti['end'])
# Only include the required instrument
fi.filter_acqs(di.ArchiveAcq.inst == | |
the words 'freedom' and 'immigration' being said in a speech, or
P(F,I).
The first step is multiplying the probabilities of <NAME> giving a speech with her individual
probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text.
The second step is multiplying the probabilities of <NAME> giving a speech with his individual
probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text.
The third step is to add both of these probabilities and you will get P(F,I).
'''
# In[24]:
'''
Solution: Step 1
'''
# P(J)
p_j = 0.5
# P(F/J)
p_j_f = 0.1
# P(I/J)
p_j_i = 0.1
p_j_text = p_j * p_j_f * p_j_i# TODO
print(p_j_text)
# In[25]:
'''
Solution: Step 2
'''
# P(G)
p_g = 0.5
# P(F/G)
p_g_f = 0.7
# P(I/G)
p_g_i = 0.2
p_g_text = p_g * p_g_f * p_g_i# TODO
print(p_g_text)
# In[26]:
'''
Solution: Step 3: Compute P(F,I) and store in p_f_i
'''
p_f_i = p_j_text + p_g_text# TODO
print('Probability of words freedom and immigration being said are: ', format(p_f_i))
# Now we can compute the probability of `P(J|F,I)`, the probability of Jill Stein saying the words 'freedom' and 'immigration' and `P(G|F,I)`, the probability of <NAME> saying the words 'freedom' and 'immigration'.
# In[27]:
'''
Instructions:
Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi
'''
# In[28]:
'''
Solution
'''
p_j_fi = p_j_text / p_f_i# TODO
print('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi))
# In[29]:
'''
Instructions:
Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi
'''
# In[30]:
'''
Solution
'''
p_g_fi = p_g_text / p_f_i# TODO
print('The probability of <NAME> saying the words Freedom and Immigration: ', format(p_g_fi))
# And as we can see, just like in the Bayes' theorem case, the sum of our posteriors is equal to 1.
#
# Congratulations! You have implemented the Naive Bayes' theorem from scratch. Our analysis shows that there is only a 6.6% chance that <NAME> the Green Party uses the words 'freedom' and 'immigration' in her speech as compared with the 93.3% chance for <NAME> of the Libertarian party.
# For another example of Naive Bayes, let's consider searching for images using the term 'Sacramento Kings' in a search engine. In order for us to get the results pertaining to the Scramento Kings NBA basketball team, the search engine needs to be able to associate the two words together and not treat them individually. If the search engine only searched for the words individually, we would get results of images tagged with 'Sacramento,' like pictures of city landscapes, and images of 'Kings,' which might be pictures of crowns or kings from history. But associating the two terms together would produce images of the basketball team. In the first approach we would treat the words as independent entities, so it would be considered 'naive.' We don't usually want this approach from a search engine, but it can be extremely useful in other cases.
#
#
# Applying this to our problem of classifying messages as spam, the Naive Bayes algorithm *looks at each word individually and not as associated entities* with any kind of link between them. In the case of spam detectors, this usually works, as there are certain red flag words in an email which are highly reliable in classifying it as spam. For example, emails with words like 'viagra' are usually classified as spam.
# ### Step 5: Naive Bayes implementation using scikit-learn ###
#
# Now let's return to our spam classification context. Thankfully, sklearn has several Naive Bayes implementations that we can use, so we do not have to do the math from scratch. We will be using sklearn's `sklearn.naive_bayes` method to make predictions on our SMS messages dataset.
#
# Specifically, we will be using the multinomial Naive Bayes algorithm. This particular classifier is suitable for classification with discrete features (such as in our case, word counts for text classification). It takes in integer word counts as its input. On the other hand, Gaussian Naive Bayes is better suited for continuous data as it assumes that the input data has a Gaussian (normal) distribution.
# In[31]:
'''
Instructions:
We have loaded the training data into the variable 'training_data' and the testing data into the
variable 'testing_data'.
Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier
'naive_bayes'. You will be training the classifier using 'training_data' and 'y_train' from our split earlier.
'''
# In[32]:
'''
Solution
'''
from sklearn.naive_bayes import MultinomialNB
naive_bayes = MultinomialNB()# TODO
naive_bayes.fit(training_data, y_train)# TODO)
# In[33]:
'''
Instructions:
Now that our algorithm has been trained using the training data set we can now make some predictions on the test data
stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable.
'''
# In[34]:
'''
Solution
'''
predictions = naive_bayes.predict(testing_data)# TODO)
print(predictions)
# Now that predictions have been made on our test set, we need to check the accuracy of our predictions.
# ### Step 6: Evaluating our model ###
#
# Now that we have made predictions on our test set, our next goal is to evaluate how well our model is doing. There are various mechanisms for doing so, so first let's review them.
#
# **Accuracy** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).
#
# **Precision** tells us what proportion of messages we classified as spam, actually were spam.
# It is a ratio of true positives (words classified as spam, and which actually are spam) to all positives (all words classified as spam, regardless of whether that was the correct classification). In other words, precision is the ratio of
#
# `[True Positives/(True Positives + False Positives)]`
#
# **Recall (sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.
# It is a ratio of true positives (words classified as spam, and which actually are spam) to all the words that were actually spam. In other words, recall is the ratio of
#
# `[True Positives/(True Positives + False Negatives)]`
#
# For classification problems that are skewed in their classification distributions like in our case - for example if we had 100 text messages and only 2 were spam and the other 98 weren't - accuracy by itself is not a very good metric. We could classify 90 messages as not spam (including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam (all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the **F1 score**, which is the weighted average of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score.
# We will be using all 4 of these metrics to make sure our model does well. For all 4 metrics whose values can range from 0 to 1, having a score as close to 1 as possible is a good indicator of how well our model is doing.
# In[35]:
'''
Instructions:
Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions
you made earlier stored in the 'predictions' variable.
'''
# In[36]:
'''
Solution
'''
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))# TODO)))
print('Precision score: ', format(precision_score(y_test, predictions)))# TODO)))
print('Recall score: ', format(recall_score(y_test, predictions)))# TODO)))
print('F1 score: ', format(f1_score(y_test, predictions)))# TODO)))
# ### Step 7: Conclusion ###
#
# One of the major advantages that Naive Bayes has over other classification algorithms is its ability to handle an extremely large number of features. In our case, each word is treated as a feature and there are thousands of different words. Also, it performs well even with the presence of irrelevant features and is relatively unaffected by them. The other major advantage it has is its relative simplicity. Naive Bayes' works | |
times = [self.efield_times[i] + arrival_times[i] for i in range(len(arrival_times))]
efield_permutations = self.relevant_permutations(times)
diagram_instructions = []
for perm in efield_permutations:
diagram_instructions += self.instructions_from_permutation(perm)
self.current_instructions = diagram_instructions
t1 = time.time()
try:
instructions = diagram_instructions[0]
signal = self.execute_diagram(instructions)
for instructions in diagram_instructions[1:]:
signal += self.execute_diagram(instructions)
except IndexError:
signal = 0
t2 = time.time()
self.automation_time += t1-t0
self.diagram_to_signal_time += t2-t1
return signal
def calculate_diagrams(self,diagram_instructions,arrival_times):
try:
old_pulse_times = self.pulse_times
for i in range(len(old_pulse_times)):
if old_pulse_times[i] != arrival_times[i]:
self.remove_rhos_by_pulse_number(i)
except AttributeError:
pass
self.pulse_times = arrival_times
self.current_instructions = diagram_instructions
instructions = diagram_instructions[0]
signal = self.execute_diagram(instructions)
for instructions in diagram_instructions[1:]:
signal += self.execute_diagram(instructions)
return signal
def polarization_detection_rho_to_signal(self,rho):
p_of_t = self.dipole_expectation(rho,pulse_number=-1,ket_flag=True)
return self.polarization_to_signal(p_of_t,local_oscillator_number=-1)
def integrated_polarization_detection_rho_to_signal(self,rho):
p = self.integrated_dipole_expectation(rho,ket_flag=True)
return self.integrated_polarization_to_signal(p,local_oscillator_number=-1)
# def fluorescence_detection_rho_to_signal(self,rho):
# L_size = self.eigenvalues[0].size
# H_size = int(np.sqrt(L_size))
# # reshape rho into a normal density matrix representation
# rho = rho.reshape((H_size,H_size))
# fluorescence_yield = np.array([0,1,1,self.f_yield])
# signal = np.dot(np.diagonal(rho),fluorescence_yield)
# return signal
def set_efields(self,times_list,efields_list,centers_list,phase_discrimination,*,reset_rhos = True,
plot_fields = False):
self.efield_times = times_list
self.efields = efields_list
self.centers = centers_list
self.set_phase_discrimination(phase_discrimination)
self.dts = []
self.efield_frequencies = []
if reset_rhos:
self.rhos = dict()
for t in times_list:
if t.size == 1:
dt = 1
w = np.array([0])
else:
dt = t[1] - t[0]
w = fftshift(fftfreq(t.size,d=dt))*2*np.pi
self.dts.append(dt)
self.efield_frequencies.append(w)
self.dt = self.dts[0]
if self.detection_type == 'polarization':
try:
self.local_oscillator = self.efields[-1].copy()
except:
self.local_oscillator = copy.deepcopy(self.efields[-1])
for field in self.efields:
if len(field) == 1:
# M = 1 is the impulsive limit
pass
else:
self.check_efield_resolution(field,plot_fields = plot_fields)
def check_efield_resolution(self,efield,*,plot_fields = False):
efield_tail = np.max(np.abs([efield[0],efield[-1]]))
if efield_tail > np.max(np.abs(efield))/100:
warnings.warn('Consider using larger num_conv_points, pump does not decay to less than 1% of maximum value in time domain')
efield_fft = fftshift(fft(ifftshift(efield)))*self.dt
efield_fft_tail = np.max(np.abs([efield_fft[0],efield_fft[-1]]))
if efield_fft_tail > np.max(np.abs(efield_fft))/100:
warnings.warn('''Consider using smaller value of dt, pump does not decay to less than 1% of maximum value in frequency domain''')
if plot_fields:
fig, axes = plt.subplots(1,2)
l1,l2, = axes[0].plot(self.efield_t,np.real(efield),self.efield_t,np.imag(efield))
plt.legend([l1,l2],['Real','Imag'])
axes[1].plot(self.efield_w,np.real(efield_fft),self.efield_w,np.imag(efield_fft))
axes[0].set_ylabel('Electric field Amp')
axes[0].set_xlabel('Time ($\omega_0^{-1})$')
axes[1].set_xlabel('Frequency ($\omega_0$)')
fig.suptitle('Check that efield is well-resolved in time and frequency')
plt.show()
def set_local_oscillator_phase(self,phase):
self.efields[-1] = np.exp(1j*phase) * self.local_oscillator
def get_closest_index_and_value(self,value,array):
"""Given an array and a desired value, finds the closest actual value
stored in that array, and returns that value, along with its corresponding
array index
"""
index = np.argmin(np.abs(array - value))
value = array[index]
return index, value
def load_L(self):
"""Load in known eigenvalues. Must be stored as a numpy archive file,
with keys: GSM, SEM, and optionally DEM. The eigenvalues for each manifold
must be 1d arrays, and are assumed to be ordered by increasing energy. The
energy difference between the lowest energy ground state and the lowest
energy singly-excited state should be set to 0
"""
L_save_name = os.path.join(self.base_path,'L.npz')
try:
with np.load(L_save_name,allow_pickle=True) as L_archive:
self.L = dict()
for key in L_archive.keys():
L = L_archive[key]
if L.dtype == np.dtype('O'):
self.L[key] = L[()]
else:
if self.check_sparsity(L):
self.L[key] = csr_matrix(L)
else:
self.L[key] = L
except:
self.L = {'all_manifolds':load_npz(L_save_name)}
self.manifolds = list(self.L.keys())
def check_sparsity(self,mat):
csr_mat = csr_matrix(mat)
sparsity = csr_mat.nnz / (csr_mat.shape[0]*csr_mat.shape[1])
if sparsity < self.sparsity_threshold:
return True
else:
return False
def dL(self,t,rho):
try:
L = self.L['all_manifolds']
except KeyError:
L = self.L[rho.manifold_key]
return L.dot(rho)
def get_dL_manual(self,manifold_key):
try:
L = self.L['all_manifolds']
except KeyError:
L = self.L[manifold_key]
def L_fun(t,rho):
return L.dot(rho)
return L_fun
def one_time_step_function(self,rho0,t0,tf,*,manifold_key = None):
num_steps = 0
if manifold_key == None:
rk45 = RK45(self.dL,t0,rho0,tf,atol=self.atol,rtol=self.rtol)
else:
dL = self.get_dL_manual(manifold_key)
rk45 = RK45(dL,t0,rho0,tf,atol=self.atol,rtol=self.rtol)
while rk45.t < tf:
rk45.step()
num_steps += 1
rho_final = rk45.y
return rho_final
def get_bottom_eigenvector(self):
try:
L = self.L['all_manifolds']
except KeyError:
L = self.L['00']
if L.shape == (1,1):
e = L[0,0]
ev = np.array([[1]])
else:
e, ev = eigs(L,k=1,which='SM',maxiter=10000)
if e.size == 1 and np.allclose(e,0):
pass
else:
raise Exception('Smallest magnitude eigenvalue of L is {}. L must have a single stationary state for this code to work'.format(e))
v = ev[:,0]
H_size = int(np.sqrt(v.size))
rho = v.reshape((H_size,H_size))
trace = rho.trace()
v = v/trace # Need to start with a trace 1 object
return v
def set_rho0_auto(self):
try:
rho0 = np.load(os.path.join(self.base_path,'rho0.npy'))
except FileNotFoundError:
rho0 = self.get_bottom_eigenvector()
t = np.array([-np.inf,0,np.inf])
rho0 = rho0[:,np.newaxis] * np.ones((rho0.size,t.size))
pulse_number = None
manifold_key = '00'
self.rho0 = RK_rho_container(t,rho0,pulse_number,manifold_key,
interp_kind = 'zero',optical_gap = self.optical_gap)
def set_rho_shapes(self):
self.rho_shapes = dict()
if 'all_manifolds' in self.manifolds:
L_size = self.L['all_manifolds'].size
H_size = int(np.sqrt(L_size))
self.rho_shapes['all_manifolds'] = (H_size,H_size)
else:
H_sizes = dict()
for key in self.manifolds:
ket_key, bra_key = key
if ket_key == bra_key:
L_size = self.L[key].shape[0]
H_size = int(np.sqrt(L_size))
H_sizes[ket_key] = H_size
for key in self.manifolds:
ket_key, bra_key = key
ket_size = H_sizes[ket_key]
bra_size = H_sizes[bra_key]
self.rho_shapes[key] = (ket_size,bra_size)
def load_mu(self):
"""Load the precalculated dipole overlaps. The dipole operator must
be stored as a .npz file, and must contain at least one array, each with three
indices: (new manifold index, old manifold eigenfunction,
cartesian coordinate)."""
try:
file_name = os.path.join(self.base_path,'mu_site_basis.npz')
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
except FileNotFoundError:
try:
file_name = os.path.join(self.base_path,'mu_original_L_basis.npz')
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
except FileNotFoundError:
file_name = os.path.join(self.base_path,'mu.npz')
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
sparse_flags = []
for key in self.mu.keys():
mu_2D = np.sum(np.abs(self.mu[key])**2,axis=-1)
sparse_flags.append(self.check_sparsity(mu_2D))
sparse_flags = np.array(sparse_flags)
if np.allclose(sparse_flags,True):
self.sparse_mu_flag = True
else:
self.sparse_mu_flag = False
for key in self.mu.keys():
mu_x = self.mu[key][...,0]
mu_y = self.mu[key][...,1]
mu_z = self.mu[key][...,2]
if self.sparse_mu_flag:
self.mu[key] = [csr_matrix(mu_x),csr_matrix(mu_y),csr_matrix(mu_z)]
else:
self.mu[key] = [mu_x,mu_y,mu_z]
print('RKE_sparse_mu_flag',self.sparse_mu_flag)
### Setting the electric field to be used
def set_polarization_sequence(self,polarization_list,*,reset_rhos=True):
"""Sets the sequences used for either parallel or crossed pump and probe
Args:
polarization_list (list): list of four strings, can be 'x','y' or 'z'
Returns:
None: sets the attribute polarization sequence
"""
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
pol_options = {'x':x,'y':y,'z':z}
self.polarization_sequence = [pol_options[pol] for pol in polarization_list]
if reset_rhos:
self.rhos = dict()
### Tools for recursively calculating perturbed density maatrices using TDPT
def dipole_matrix(self,pulse_number,key,ket_flag=True,up_flag=True):
"""Calculates the dipole matrix given the electric field polarization vector,
if ket_flag = False then uses the bra-interaction"""
t0 = time.time()
pol = self.polarization_sequence[pulse_number]
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
try:
mu = self.mu[key]
except KeyError:
if ket_flag:
key = 'ket'
else:
key = 'bra'
if up_flag:
key += '_up'
else:
key += '_down'
mu = self.mu[key]
if np.all(pol == x):
overlap_matrix = mu[0]#.copy()
elif np.all(pol == y):
overlap_matrix = mu[1]#.copy()
elif np.all(pol == z):
overlap_matrix = mu[2]#.copy()
else:
overlap_matrix = mu[0]*pol[0] + mu[1]*pol[1] + mu[2]*pol[2]
# if self.sparse_mu_flag:
# to_return = csr_matrix(overlap_matrix)
# else:
# to_return = overlap_matrix
t1 = time.time()
self.dipole_time += t1-t0
return overlap_matrix
def manifold_key_to_array(self,key):
"""Key must be a string of exactly 2 integers, the first describing
the ket manifold, the second the bra manifold. If the density
matrix is represented in the full space, rather than being divided
into manifolds, the first integer reperesents the total number of
excitations to the ket side, and the second integers represents
the sum of all excitations to the bra side."""
if len(key) != 2:
raise Exception('manifold key must be a string of exactly two intgers')
return np.array([int(char) for char in key],dtype=int)
def manifold_array_to_key(self,manifold):
"""Inverse of self.manifold_key_to_array"""
if manifold.size != 2 or manifold.dtype != int:
raise Exception('manifold array must contain exactly 2 integer')
return str(manifold[0]) + str(manifold[1])
def next_order(self,rho_in,*,ket_flag=True,up_flag=True,pulse_number = 0):
"""This function connects psi_p to psi+pj^(*) using the Euler Method.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
Return:
rho_dict (rho_container): next-order density matrix
"""
pulse_time = self.pulse_times[pulse_number]
t = self.efield_times[pulse_number] + pulse_time
old_manifold_key = rho_in.manifold_key
if up_flag:
change = 1
else:
change = -1
if ket_flag:
manifold_change = np.array([change,0],dtype=int)
else:
manifold_change = np.array([0,change],dtype=int)
old_manifold = self.manifold_key_to_array(old_manifold_key)
new_manifold = old_manifold + manifold_change
new_manifold_key = self.manifold_array_to_key(new_manifold)
mu_key = old_manifold_key + '_to_' + new_manifold_key
if ket_flag == up_flag:
# Rotating term excites the ket and de-excites the bra
conjugate_flag = False
else:
# Counter-rotating term
conjugate_flag = True
if conjugate_flag:
center = | |
rec["rev"]
# check record was updated
response = client.get("/index/" + rec_2["did"])
assert response.status_code == 200
record = response.json
assert record["metadata"] == dataNew["metadata"]
assert record["acl"] == dataNew["acl"]
assert record["authz"] == dataNew["authz"]
# create record
data = get_doc()
data["did"] = "cdis:3d313755-cbb4-4b08-899d-7bbac1f6e67d"
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec = res.json
assert rec["did"]
assert rec["rev"]
# update record
dataNew = {
"urls": ["s3://endpointurl/bucket/key"],
"file_name": "test",
"version": "ver123",
}
res_2 = client.put(
"/index/{}?rev={}".format(rec["did"], rec["rev"]), json=dataNew, headers=user
)
assert res_2.status_code == 200
rec_2 = res_2.json
assert rec_2["rev"] != rec["rev"]
def test_index_update_with_authz_check(client, user, use_mock_authz):
old_authz = "/programs/A"
new_authz = "/programs/B"
# create record
data = get_doc()
data["authz"] = [old_authz]
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200, res.json
rec = res.json
assert rec["did"]
assert rec["rev"]
rev = rec["rev"]
# user doesn't have all the required access: cannot update record
use_mock_authz([("update", new_authz)])
to_update = {"authz": [new_authz]}
res = client.put("/index/{}?rev={}".format(rec["did"], rev), json=to_update)
assert res.status_code == 403, res.json
# user has all the required access: can update record
use_mock_authz([("update", new_authz), ("update", old_authz)])
to_update = {"authz": [new_authz]}
res = client.put("/index/{}?rev={}".format(rec["did"], rev), json=to_update)
assert res.status_code == 200, res.json
rec = res.json
assert rec["rev"] != rev
# check record was updated
res = client.get("/index/" + rec["did"])
assert res.status_code == 200, res.json
rec = res.json
assert rec["authz"] == [new_authz]
def test_index_update_duplicate_acl_authz(client, user):
data = get_doc()
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec = res.json
assert rec["did"]
assert rec["rev"]
assert client.get("/index/" + rec["did"]).json["metadata"] == data["metadata"]
dataNew = get_doc()
del dataNew["hashes"]
del dataNew["size"]
del dataNew["form"]
dataNew["metadata"] = {"test": "abcd"}
dataNew["version"] = "ver123"
dataNew["acl"] = ["c", "d", "c"]
dataNew["authz"] = ["x", "y", "x"]
res_2 = client.put(
"/index/{}?rev={}".format(rec["did"], rec["rev"]), json=dataNew, headers=user
)
assert res_2.status_code == 200
rec_2 = res_2.json
assert rec_2["rev"] != rec["rev"]
response = client.get("/index/" + rec["did"])
assert response.status_code == 200
record = response.json
assert record["metadata"] == dataNew["metadata"]
assert record["acl"] == ["c", "d"]
assert record["authz"] == ["x", "y"]
def test_update_uploader_field(client, user):
data = get_doc()
data["uploader"] = "uploader_123"
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec = res.json
assert rec["did"]
assert rec["rev"]
res = client.get("/index/" + rec["did"])
assert res.status_code == 200
rec = res.json
assert rec["uploader"] == "uploader_123"
updated = {"uploader": "new_uploader"}
res = client.put(
"/index/{}?rev={}".format(rec["did"], rec["rev"]), json=updated, headers=user
)
assert res.status_code == 200
res = client.get("/index/" + rec["did"])
assert res.status_code == 200
rec = res.json
assert rec["uploader"] == "new_uploader"
updated = {"uploader": None}
res = client.put(
"/index/{}?rev={}".format(rec["did"], rec["rev"]), json=updated, headers=user
)
assert res.status_code == 200
res = client.get("/index/" + rec["did"])
assert res.status_code == 200
rec = res.json
assert rec["uploader"] is None
def test_index_delete(client, user):
data = get_doc(has_metadata=False, has_baseid=False)
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec = res.json
assert rec["did"]
assert rec["rev"]
res = client.get("/index/" + rec["did"])
assert res.status_code == 200
rec = res.json
assert rec["did"]
res = client.delete(
"/index/{}?rev={}".format(rec["did"], rec["rev"]), json=data, headers=user
)
assert res.status_code == 200
# make sure its deleted
res = client.get("/index/{}?rev={}".format(rec["did"], rec["rev"]))
assert res.status_code == 404
def test_create_index_version(client, user):
data = get_doc(has_metadata=False, has_baseid=False)
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec = res.json
assert rec["did"]
assert rec["rev"]
assert rec["baseid"]
dataNew = {
"did": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"form": "object",
"size": 244,
"urls": ["s3://endpointurl/bucket2/key"],
"hashes": {"md5": "8b9942cf415384b27cadf1f4d2d981f5"},
"acl": ["a"],
}
res_2 = client.post("/index/" + rec["did"], json=dataNew, headers=user)
assert res_2.status_code == 200
rec_2 = res_2.json
assert rec_2["baseid"] == rec["baseid"]
assert rec_2["did"] == dataNew["did"]
def test_get_latest_version(client, user):
data = get_doc(has_metadata=False, has_baseid=False, has_version=True)
res_1 = client.post("/index/", json=data, headers=user)
assert res_1.status_code == 200
rec_1 = res_1.json
assert rec_1["did"]
data = get_doc(has_metadata=False, has_baseid=False, has_version=False)
res_2 = client.post("/index/" + rec_1["did"], json=data, headers=user)
assert res_2.status_code == 200
rec_2 = res_2.json
res_3 = client.get("/index/{}/latest".format(rec_2["did"]))
assert res_3.status_code == 200
rec_3 = res_3.json
assert rec_3["did"] == rec_2["did"]
res_4 = client.get("/index/{}/latest".format(rec_1["baseid"]))
assert res_4.status_code == 200
rec_4 = res_4.json
assert rec_4["did"] == rec_2["did"]
res_5 = client.get("/index/{}/latest?has_version=True".format(rec_1["baseid"]))
assert res_5.status_code == 200
rec_5 = res_5.json
assert rec_5["did"] == rec_1["did"]
def test_get_all_versions(client, user):
dids = []
# create 1st version
data = get_doc(has_metadata=False, has_baseid=False)
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec1 = res.json
assert rec1["did"]
dids.append(rec1["did"])
# create 2nd version
res = client.post("/index/" + rec1["did"], json=data, headers=user)
assert res.status_code == 200
rec2 = res.json
assert rec2["did"]
dids.append(rec2["did"])
# make sure all versions are returned when hitting the /versions endpoint
res = client.get("/index/{}/versions".format(rec1["did"]))
recs1 = res.json
assert len(recs1) == 2
res = client.get("/index/{}/versions".format(rec1["baseid"]))
recs2 = res.json
assert len(recs2) == 2
assert recs1 == recs1
# make sure records are returned in creation date order
for i, record in recs1.items():
assert record["did"] == dids[int(i)], "record id does not match"
def test_update_all_versions(client, user):
dids = []
mock_acl_A = ["mock_acl_A1", "mock_acl_A2"]
mock_acl_B = ["mock_acl_B1", "mock_acl_B2"]
mock_authz_A = ["mock_authz_A1", "mock_authz_A2"]
mock_authz_B = ["mock_authz_B1", "mock_authz_B2"]
# SETUP
# -------
# create 1st version
data = get_doc(has_metadata=False, has_baseid=False)
data["acl"] = mock_acl_A
data["authz"] = mock_authz_A
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec1 = res.json
assert rec1["did"]
dids.append(rec1["did"])
# create 2nd version
res = client.post("/index/" + rec1["did"], json=data, headers=user)
assert res.status_code == 200
rec2 = res.json
assert rec2["did"]
dids.append(rec2["did"])
# ----------
# Update all versions
update_data = {"acl": mock_acl_B, "authz": mock_authz_B}
res = client.put(
"/index/{}/versions".format(rec1["did"]), json=update_data, headers=user
)
assert res.status_code == 200, "Failed to update all version: {}".format(res.json)
# Expect the GUIDs of all updated versions to be returned by the request,
# in order of version creation
assert dids == [record["did"] for record in res.json]
# Expect all versions to have the new acl/authz
res = client.get("/index/{}/versions".format(rec1["did"]))
assert res.status_code == 200, "Failed to get all versions"
for _, version in res.json.items():
assert version["acl"] == mock_acl_B
assert version["authz"] == mock_authz_B
def test_update_all_versions_using_baseid(client, user):
mock_acl_A = ["mock_acl_A1", "mock_acl_A2"]
mock_acl_B = ["mock_acl_B1", "mock_acl_B2"]
mock_authz_A = ["mock_authz_A1", "mock_authz_A2"]
mock_authz_B = ["mock_authz_B1", "mock_authz_B2"]
# SETUP
# -------
# create 1st version
data = get_doc(has_metadata=False, has_baseid=False)
data["acl"] = mock_acl_A
data["authz"] = mock_authz_A
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec1 = res.json
assert rec1["did"]
baseid = rec1["baseid"]
# create 2nd version
res = client.post("/index/" + rec1["did"], json=data, headers=user)
assert res.status_code == 200
rec2 = res.json
assert rec2["baseid"] == baseid
# ----------
# Update all versions
update_data = {"acl": mock_acl_B, "authz": mock_authz_B}
res = client.put(
"/index/{}/versions".format(baseid), json=update_data, headers=user
)
assert res.status_code == 200, "Failed to update all version: {}".format(res.json)
# Expect all versions to have the new acl/authz
res = client.get("/index/{}/versions".format(rec1["did"]))
assert res.status_code == 200, "Failed to get all versions"
for _, version in res.json.items():
assert version["acl"] == mock_acl_B
assert version["authz"] == mock_authz_B
def test_update_all_versions_guid_not_found(client, user):
bad_guid = "00000000-0000-0000-0000-000000000000"
update_data = {"acl": ["mock_acl"], "authz": ["mock_authz"]}
res = client.put(
"/index/{}/versions".format(bad_guid), json=update_data, headers=user
)
# Expect the operation to fail with 404 -- Guid not found
assert (
res.status_code == 404
), "Expected update operation to fail with 404: {}".format(res.json)
def test_update_all_versions_fail_on_bad_metadata(client, user):
"""
When making an update request, endpoint should return 400 (User error) if
the metadata to update contains any fields that cannot be updated across all versions.
Currently the only allowed fields are ('acl', 'authz').
"""
mock_acl_A = ["mock_acl_A1", "mock_acl_A2"]
mock_acl_B = ["mock_acl_B1", "mock_acl_B2"]
mock_authz_A = ["mock_authz_A1", "mock_authz_A2"]
mock_authz_B = ["mock_authz_B1", "mock_authz_B2"]
# SETUP
# -------
# create 1st version
data = get_doc(has_metadata=False, has_baseid=False)
data["acl"] = mock_acl_A
data["authz"] = mock_authz_A
res = client.post("/index/", json=data, headers=user)
assert res.status_code == 200
rec1 = res.json
assert rec1["did"]
baseid = rec1["baseid"]
# create 2nd version
res = client.post("/index/" + rec1["did"], json=data, headers=user)
assert res.status_code == 200
rec2 = res.json
assert rec2["baseid"] == baseid
# ----------
# Update all versions
update_data = {"urls": ["url_A"], "acl": mock_acl_B, "authz": mock_authz_B}
res = client.put(
"/index/{}/versions".format(baseid), json=update_data, headers=user
)
# Expect the operation to fail with 400
assert (
res.status_code == 400
), "Expected update operation to fail with 400: {}".format(res.json)
# Expect all versions to retain the old acl/authz
res = client.get("/index/{}/versions".format(rec1["did"]))
assert res.status_code == 200, "Failed to get all versions"
for _, version in res.json.items():
assert version["acl"] == mock_acl_A
assert version["authz"] == mock_authz_A
def test_update_all_versions_fail_on_missing_permissions(client, user, use_mock_authz):
"""
If user does not have the 'update' permission on any record, request should
fail | |
"""Handling of GNSS broadcast orbits
Description:
------------
The module includes a class for handling apriori GNSS broadcast orbits.
Example:
from where import apriori
# Get broadcast ephemeris object
brdc = apriori.get('orbit', rundate=rundate, station=station, , system=system, apriori_orbit='broadcast')
# Write calculated Dataset to file
brdc.dset.write()
"""
# Standard library imports
from datetime import timedelta
from typing import Dict, List, Union
# External library imports
import numpy as np
import pandas as pd
# Midgard imports
from midgard.collections import enums
from midgard.dev import plugins
from midgard.files import dependencies
from midgard.math.constant import constant
from midgard.math.unit import Unit
from midgard.parsers import rinex_nav
# Where imports
from where.data import dataset3 as dataset
from where import cleaners
from where.apriori import orbit
from where.lib import config
from where.lib import log
from where.lib import rotation
# Earth's gravitational constant
GM = {
"C": constant.get("GM", source="cgcs2000"),
"E": constant.get("GM", source="gtrf"),
"G": constant.get("GM", source="wgs84"),
"I": constant.get("GM", source="wgs84"),
"J": constant.get("GM", source="jgs"),
}
# Earth's rotation rate
OMEGA = {
"C": constant.get("omega", source="cgcs2000"),
"E": constant.get("omega", source="gtrf"),
"G": constant.get("omega", source="wgs84"),
"I": constant.get("omega", source="wgs84"),
"J": constant.get("omega", source="jgs"),
}
@plugins.register
class BroadcastOrbit(orbit.AprioriOrbit):
"""A class for representing apriori broadcast orbits
RINEX navigation files can be read and edited. In addition GNSS satellite position, velocities, satellite clock
correction and relativistic clock corrections can be calculated for given observation epochs based on broadcast
ephemeris.
Attributes:
dset (Dataset): Dataset object, which includes satellite position and velocity, satellite clock
correction and relativistic clock correction for each observation epoch
dset_edit (Dataset): Dataset object, which includes edited broadcast ephemeris navigation messages
dset_raw (Dataset): Dataset object, which includes broadcast ephemeris navigation messages read from RINEX
file
file_key (str): Key to the broadcast orbit file defined in files.conf file.
name (str): Apriori orbit name
system (tuple): GNSS system identifiers
Methods:
relativistic_clock_correction(): Determine relativistic clock correction due to orbit eccentricity
satellite_clock_correction(): Determine satellite clock correction
unhealthy_satellites(): Get unhealthy satellites based on RINEX navigation file information
_calculate(): Calculate broadcast ephemeris and satellite clock correction for given observation
epochs
_edit(): Edit RINEX navigation file data and save it in a Dataset
_read(): Read RINEX navigation file data and save it in a Dataset
"""
name = "broadcast"
def __init__(self, rundate, system, station=None, file_key=None, file_path=None, day_offset=1):
"""Set up a new BroadcastOrbit object, does not parse any data
TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)
Args:
rundate (date): Date of model run.
station (str): 4 character station identifier.
system (tuple): List with GNSS system string codes (G, E, R, etc.).
file_key (str): Key to the broadcast orbit file defined in files.conf file.
file_path (pathlib.PosixPath): File path to broadcast orbit file.
day_offset (int): Day offset used to calculate the number of days to read.
"""
super().__init__(rundate=rundate)
self.system = system
self.file_key = "gnss_rinex_nav_{system}" if file_key is None else file_key
self.file_path = file_path
self.day_offset = day_offset
# TODO hjegei: Should it be not enough to 'station' in _dset_raw?
self._dset_raw.vars["station"] = station.lower()
self._dset_raw.vars["STATION"] = self._dset_raw.vars["station"].upper()
self._dset_edit.vars["station"] = station.lower()
self._dset_edit.vars["STATION"] = self._dset_raw.vars["station"].upper()
def _read(self, dset_raw):
"""Read RINEX navigation file data and save it in a Dataset
Note that beside the given day also the navigation files from the day before and the day after is read.
One RINEX navigation file is normally written for each GNSS. The navigation file extension depends on the GNSS
(GPS: *.n, Galileo: *.l, GLONASS: *.g, ...). Therefore we have defined for each GNSS the navigation file
name in the Where configuration file `files.conf`. In addition mixed navigation files exists, which includes
navigation messages of different GNSS. We use following file keys in `files.conf`:
========= ==================
System File key
========= ==================
Galileo gnss_rinex_nav_E
GLONASS gnss_rinex_nav_R
GPS gnss_rinex_nav_G
Mixed gnss_rinex_nav_M
========= ==================
Depending on the configuration options `systems` and `use_mixed_brdc_file` following navigation files are read:
====================== ================== =======================================
Option File key What kind of navigation file is read?
====================== ================== =======================================
systems = G gnss_rinex_nav_G Only the GPS navigation file
systems = G E gnss_rinex_nav_G GPS and Galileo navigation files
gnss_rinex_nav_E
use_mixed_brdc_file gnss_rinex_nav_M Mixed GNSS navigation file
====================== ================== =======================================
Args:
dset_raw (Dataset): Dataset representing raw data from RINEX navigation file
"""
date_to_read = dset_raw.analysis["rundate"] - timedelta(days=self.day_offset)
use_mixed_brdc_file = config.tech.get("use_mixed_brdc_file", default=False).bool
systems = {"M"} if use_mixed_brdc_file == True else self.system
file_paths = list()
# Loop over days to read
while date_to_read <= dset_raw.analysis["rundate"] + timedelta(days=self.day_offset):
date = date_to_read.strftime("%Y-%m-%d")
meta = dict()
for sys in systems:
if self.file_path is None:
file_path = config.files.path(
self.file_key.format(system=sys), file_vars=config.date_vars(date_to_read)
)
else:
file_path = self.file_path
log.debug(f"Parse broadcast orbit file {file_path}")
# Generate temporary Dataset with orbit file data
dset_temp = dataset.Dataset(
rundate=date_to_read, pipeline=dset_raw.vars["pipeline"], stage="temporary"
)
parser = rinex_nav.get_rinex2_or_rinex3(file_path)
dset_temp.update_from(parser.as_dataset())
file_paths.append(str(parser.file_path))
dependencies.add(str(parser.file_path), label=self.file_key.format(system=sys)) # Used for output writing
# Extend Dataset dset_raw with temporary Dataset
if dset_raw.num_obs:
# Merge meta data information
# TODO: Handle meta data information correctly. Meta data information based on different GNSS navigation
# message files has to be merged correctly together. What to do with 'sat_sys' and 'leap_seconds'?
if date in dict(dset_raw.meta).keys():
for key in ["iono_para", "time_sys_corr"]:
dset_temp.meta[key].update(dset_raw.meta[date][key])
dset_raw.extend(dset_temp, meta_key=date)
else:
# Add date key to meta TODO: Better solution?
meta.setdefault(date, dict()).update(dset_temp.meta)
for k in dict(dset_temp["meta"]).keys():
del dset_temp.meta[k]
dset_temp.meta.update(meta)
dset_raw.update_from(dset_temp)
date_to_read += timedelta(days=1)
dset_raw.meta.add("file_path", file_paths, section="parser")
if "E" in dset_raw.unique("system"):
self._galileo_signal_health_status()
def _edit(self, dset_edit: "Dataset") -> "Dataset":
"""Edit RINEX navigation file data and save it in a Dataset
First the navigation messages are sorted after the satellite and time of transmission. Afterwards duplicated
navigation messages for a satellite are removed, whereby the first occurrence of the navigation message is
kept. The satellite navigation epoch (time of clock (toc)) and the IODE navigation number is used for
indication of a duplicated epoch. The meaning of the IODE navigation number depends on the GNSS:
- GPS: Ephemeris issue of data (IODE)
- Galileo: Issue of Data of the NAV batch (IODnav)
- QZSS: Ephemeris issue of data (IODE)
- BeiDou: Age of Data Ephemeris (AODE)
- IRNSS: Issue of Data, Ephemeris and Clock (IODEC)
It can be defined with the configuration option 'navigation_message_type', what kind of navigation message type
should be used in the analysis. Other navigation message types are removed from the broadcast ephemeris. For
example for Galileo INAV or FNAV navigation messages can be chosen.
Args:
dset_edit (Dataset): Dataset representing edited data from RINEX navigation file
"""
# TODO: This does not work. -> Can not find remover ignore_duplicated_navigation_messages().
# cleaners.removers.ignore_duplicated_navigation_messages(dset_edit) #MURKS
# Generate Pandas frame for all navigation message entries
nav = self.dset_raw.as_dataframe()
# Filter frame
nav_filtered = nav.sort_values(by=["satellite", "time", "transmission_time"])
# Remove duplicated navigation message entries (IODEs)
# TODO: Maybe it should be a configuration option, how to filter duplicated epochs. Keep first and last.
idx = nav_filtered.duplicated(
subset=["satellite", "time", "iode", "nav_type", "transmission_time"], keep="first"
)
nav_duplicates = nav_filtered[["satellite", "time", "iode", "nav_type"]][idx]
with pd.option_context("display.max_rows", None, "display.max_columns", 5):
log.info(f"Remove {len(nav_duplicates)} duplicated navigation message entries.")
log.debug(f"List of duplicated navigation messages: \n{nav_duplicates}")
nav_filtered = nav_filtered.drop_duplicates(
subset=["satellite", "time", "iode", "nav_type", "transmission_time"], keep="first"
)
# Remove navigation message types, which are not needed
nav_type = config.tech.get("navigation_message_type", default="").dict
if nav_type:
for sys, type_ in nav_type.items():
sys = sys[0] if len(sys) > 0 else sys
# Overwrite message type definition if only 'INAV' or 'FNAV' is specified
if type_ == "INAV":
type_ = ["INAV_E1", "INAV_E5b", "INAV_E1E5b"]
elif type_ == "FNAV":
type_ = ["FNAV_E5a"]
remove_nav_type = nav_filtered.query("system == @sys and nav_type != @type_")
if not remove_nav_type.empty:
log.info(
f"Remove {', '.join(set(remove_nav_type.nav_type))!r} navigation messages of GNSS {sys!r}"
)
nav_filtered = pd.concat([nav_filtered, remove_nav_type]).drop_duplicates(keep=False)
if nav_filtered.empty:
log.fatal(f"No navigation messages available for GNSS: {','.join(self.dset_raw.unique('system'))} ")
# TODO hjegei: Possible future ...
# dset_edit.copy_from(self.dset_raw)
# dset_edit.reorder(nav_filtered.index.values)
# Convert edited fields to Dataset
nav_np = nav_filtered.values
fields = nav_filtered.columns
dset_edit.vars["orbit"] = self.name
dset_edit.num_obs = nav_filtered.shape[0]
dset_edit.meta.update(self.dset_raw.meta)
for idx, field in enumerate(fields):
if field.startswith("time_") or field.startswith("transmission_time_") or field.startswith("toe_"):
continue # Skip unnecessary fields
elif field in ["time", "transmission_time", "toe"]:
dset_edit.add_time(field, val=nav_np[:, idx], scale="gps", fmt="datetime")
elif field in ["nav_type", "satellite", "system"]:
dset_edit.add_text(field, val=nav_np[:, idx])
else:
unit = self.dset_raw.unit(field)[0] if self.dset_raw.unit(field) else None
dset_edit.add_float(field, val=nav_np[:, idx].astype(float), unit=unit)
def _calculate(self, dset_out: "Dataset", dset_in: "Dataset", time: str = "time") -> None:
"""Calculate broadcast ephemeris and satellite clock correction | |
STEP and RAMP. The default value is STEP.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
directDampingByFrequency
A DirectDampingByFrequency object.
rayleighDampingByFrequency
A RayleighDampingByFrequency object.
Returns
-------
step: ModalDynamicsStep
A ModalDynamicsStep object.
"""
self.steps[name] = step = ModalDynamicsStep(name, previous, description, continueAnalysis, timePeriod, incSize,
directDamping, compositeDamping, rayleighDamping, amplitude,
maintainAttributes, directDampingByFrequency,
rayleighDampingByFrequency)
return step
def RandomResponseStep(self, name: str, previous: str, freq: RandomResponseFrequencyArray, description: str = '',
scale: SymbolicConstant = LOG, directDamping: DirectDamping = DirectDamping(),
compositeDamping: CompositeDamping = CompositeDamping(),
rayleighDamping: RayleighDamping = RayleighDamping(),
structuralDamping: StructuralDamping = StructuralDamping(),
directDampingByFrequency: DirectDampingByFrequency = DirectDampingByFrequency(),
rayleighDampingByFrequency: RayleighDampingByFrequency = RayleighDampingByFrequency(),
structuralDampingByFrequency: StructuralDampingByFrequency = StructuralDampingByFrequency(),
maintainAttributes: Boolean = False) -> RandomResponseStep:
"""This method creates a RandomResponseStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].RandomResponseStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
freq
A RandomResponseFrequencyArray object specifying frequencies over ranges of modes.
description
A String specifying a description of the new step. The default value is an empty string.
scale
A SymbolicConstant specifying the frequency scale. Possible values are LINEAR and LOG.
The default value is LOG.
directDamping
A DirectDamping object.
compositeDamping
A CompositeDamping object.
rayleighDamping
A RayleighDamping object.
structuralDamping
A StructuralDamping object.
directDampingByFrequency
A DirectDampingByFrequency object.
rayleighDampingByFrequency
A RayleighDampingByFrequency object.
structuralDampingByFrequency
A StructuralDampingByFrequency object.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
step: RandomResponseStep
A RandomResponseStep object.
"""
self.steps[name] = step = RandomResponseStep(name, previous, freq, description, scale, directDamping,
compositeDamping, rayleighDamping, structuralDamping,
directDampingByFrequency, rayleighDampingByFrequency,
structuralDampingByFrequency, maintainAttributes)
return step
def ResponseSpectrumStep(self, name: str, previous: str, components: ResponseSpectrumComponentArray,
description: str = '', comp: SymbolicConstant = SINGLE_DIRECTION,
sum: SymbolicConstant = ABS, directDamping: DirectDamping = DirectDamping(),
compositeDamping: CompositeDamping = CompositeDamping(),
rayleighDamping: RayleighDamping = RayleighDamping(),
directDampingByFrequency: DirectDampingByFrequency = DirectDampingByFrequency(),
rayleighDampingByFrequency: RayleighDampingByFrequency = RayleighDampingByFrequency(),
maintainAttributes: Boolean = False) -> ResponseSpectrumStep:
"""This method creates a ResponseSpectrumStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ResponseSpectrumStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
components
A ResponseSpectrumComponentArray object.
description
A String specifying a description of the new step. The default value is an empty string.
comp
A SymbolicConstant specifying the order and method used to sum the components. Possible
values are SINGLE_DIRECTION, MULTIPLE_DIRECTION_ABSOLUTE_SUM,
MULTIPLE_DIRECTION_SRSS_SUM, MULTIPLE_DIRECTION_THIRTY_PERCENT_RULE, and
MULTIPLE_DIRECTION_FORTY_PERCENT_RULE. The default value is SINGLE_DIRECTION.
sum
A SymbolicConstant specifying the method used to sum the components. Possible values are
ABS, CQC, NRL, SRSS, TENP, DSC, and GRP. The default value is ABS.
directDamping
A DirectDamping object.
compositeDamping
A CompositeDamping object.
rayleighDamping
A RayleighDamping object.
directDampingByFrequency
A DirectDampingByFrequency object.
rayleighDampingByFrequency
A RayleighDampingByFrequency object.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
step: ResponseSpectrumStep
A ResponseSpectrumStep object.
"""
self.steps[name] = step = ResponseSpectrumStep(name, previous, components, description, comp, sum,
directDamping, compositeDamping, rayleighDamping,
directDampingByFrequency, rayleighDampingByFrequency,
maintainAttributes)
return step
def SoilsStep(self, name: str, previous: str, description: str = '', response: SymbolicConstant = TRANSIENT,
timePeriod: float = 1, nlgeom: Boolean = OFF,
stabilizationMethod: SymbolicConstant = NONE, stabilizationMagnitude: float = None,
creep: Boolean = ON, timeIncrementationMethod: SymbolicConstant = AUTOMATIC,
initialInc: float = None, minInc: float = None, maxInc: float = None,
maxNumInc: int = 100, end: SymbolicConstant = PERIOD, utol: float = None,
cetol: float = 0, amplitude: SymbolicConstant = STEP,
extrapolation: SymbolicConstant = LINEAR, matrixSolver: SymbolicConstant = DIRECT,
matrixStorage: SymbolicConstant = SOLVER_DEFAULT, maintainAttributes: Boolean = False,
solutionTechnique: SymbolicConstant = FULL_NEWTON, reformKernel: int = 8,
convertSDI: SymbolicConstant = PROPAGATED, adaptiveDampingRatio: float = 0,
continueDampingFactors: Boolean = OFF) -> SoilsStep:
"""This method creates a SoilsStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SoilsStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
response
A SymbolicConstant specifying the analysis type. Possible values are STEADY_STATE and
TRANSIENT. The default value is TRANSIENT.
timePeriod
A Float specifying the total time period. The default value is 1.0.
nlgeom
A Boolean specifying whether geometric nonlinearities should be accounted for during the
step. The default value is OFF.
stabilizationMethod
A SymbolicConstant specifying the stabilization type. Possible values are NONE,
DISSIPATED_ENERGY_FRACTION, and DAMPING_FACTOR. The default value is NONE.
stabilizationMagnitude
A Float specifying the damping intensity of the automatic damping algorithm if the
problem is expected to be unstable, and *stabilizationMethod* is not NONE. The default
value is 2×10–4.
creep
A Boolean specifying whether a creep response occurs during this step. The default value
is ON.
timeIncrementationMethod
A SymbolicConstant specifying the time incrementation method to be used. Possible values
are FIXED and AUTOMATIC. The default value is AUTOMATIC.
initialInc
A Float specifying the initial time increment. The default value is the total time
period for the step.
minInc
A Float specifying the minimum time increment allowed. The default value is the smaller
of the suggested initial time increment or 10−5 times the total time period.
maxInc
A Float specifying the maximum time increment allowed. The default value is the total
time period for the step.
maxNumInc
An Int specifying the maximum number of increments in a step. The default value is 100.
end
A SymbolicConstant specifying the time period to be analyzed in a transient analysis.
Possible values are PERIOD and SS. The default value is PERIOD.
utol
None or a Float specifying the maximum pore pressure change permitted in any increment
(in pressure units) in a transient consolidation analysis. The default value is None.
cetol
A Float specifying the maximum allowable difference in the creep strain increment
calculated from the creep strain rates at the beginning and end of the increment. The
default value is 0.0.
amplitude
A SymbolicConstant specifying the amplitude variation for loading magnitudes during the
step. The default value is STEP. Possible values are STEP and RAMP.
extrapolation
A SymbolicConstant specifying the type of extrapolation to use in determining the
incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
PARABOLIC. The default value is LINEAR.
matrixSolver
A SymbolicConstant specifying the type of solver. Possible values are DIRECT and
ITERATIVE. The default value is DIRECT.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
solutionTechnique
A SymbolicConstant specifying the technique used to for solving nonlinear equations.
Possible values are FULL_NEWTON and QUASI_NEWTON. The default value is FULL_NEWTON.
reformKernel
An Int specifying the number of quasi-Newton iterations allowed before the kernel matrix
is reformed.. The default value is 8.
convertSDI
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
adaptiveDampingRatio
A Float specifying the maximum allowable ratio of the stabilization energy to the total
strain energy and can be used only if *stabilizationMethod* is not NONE. The default
value is 0.05.
continueDampingFactors
A Boolean specifying whether this step will carry over the damping factors from the
results of the preceding general step. This parameter must be used in conjunction with
the *adaptiveDampingRatio* parameter. The default value is OFF.
Returns
-------
step: SoilsStep
A SoilsStep object.
"""
| |
dist_yy)
return dist_xx, dist_xy, dist_yy
elif mode == 'xy':
if scale is None:
dx = tf.reduce_sum(tf.multiply(x, x), axis=1)
dy = tf.reduce_sum(tf.multiply(y, y), axis=1)
xyt = tf.matmul(x, y, transpose_b=True)
else:
dx = tf.reduce_sum(tf.multiply(x * scale, x), axis=1)
dy = tf.reduce_sum(tf.multiply(y * scale, y), axis=1)
xyt = tf.matmul(x * scale, y, transpose_b=True)
dist_xy = tf.maximum(tf.expand_dims(dx, axis=1) - 2.0 * xyt + tf.expand_dims(dy, axis=0), 0.0)
if do_summary:
with tf.name_scope(None): # return to root scope to avoid scope overlap
tf.summary.histogram(scope_prefix + name + '/dxy', dist_xy)
return dist_xy
else:
raise AttributeError('Mode {} not supported'.format(mode))
def get_squared_dist_ref(x, y):
""" This function calculates the pairwise distance between x and x, x and y, y and y.
It is more accurate than get_dist at the cost of higher memory and complexity.
:param x:
:param y:
:return:
"""
with tf.name_scope('squared_dist_ref'):
if len(x.get_shape().as_list()) > 2:
raise AttributeError('get_dist: Input must be a matrix.')
x_expand = tf.expand_dims(x, axis=2) # m-by-d-by-1
x_permute = tf.transpose(x_expand, perm=(2, 1, 0)) # 1-by-d-by-m
dxx = x_expand - x_permute # m-by-d-by-m, the first page is ai - a1
dist_xx = tf.reduce_sum(tf.multiply(dxx, dxx), axis=1) # m-by-m, the first column is (ai-a1)^2
if y is None:
return dist_xx
else:
y_expand = tf.expand_dims(y, axis=2) # m-by-d-by-1
y_permute = tf.transpose(y_expand, perm=(2, 1, 0))
dxy = x_expand - y_permute # m-by-d-by-m, the first page is ai - b1
dist_xy = tf.reduce_sum(tf.multiply(dxy, dxy), axis=1) # m-by-m, the first column is (ai-b1)^2
dyy = y_expand - y_permute # m-by-d-by-m, the first page is ai - b1
dist_yy = tf.reduce_sum(tf.multiply(dyy, dyy), axis=1) # m-by-m, the first column is (ai-b1)^2
return dist_xx, dist_xy, dist_yy
########################################################################
def squared_dist_triplet(x, y, z, name='squared_dist', do_summary=False, scope_prefix=''):
""" This function calculates the pairwise distance between x and x, x and y, y and y, y and z, z and z in 'seq'
mode, or any two pairs in 'all' mode
:param x:
:param y:
:param z:
:param name:
:param do_summary:
:param scope_prefix:
:return:
"""
with tf.name_scope(name):
x_x = tf.matmul(x, x, transpose_b=True)
y_y = tf.matmul(y, y, transpose_b=True)
z_z = tf.matmul(z, z, transpose_b=True)
x_y = tf.matmul(x, y, transpose_b=True)
y_z = tf.matmul(y, z, transpose_b=True)
x_z = tf.matmul(x, z, transpose_b=True)
d_x = tf.diag_part(x_x)
d_y = tf.diag_part(y_y)
d_z = tf.diag_part(z_z)
d_x_x = tf.maximum(tf.expand_dims(d_x, axis=1) - 2.0 * x_x + tf.expand_dims(d_x, axis=0), 0.0)
d_y_y = tf.maximum(tf.expand_dims(d_y, axis=1) - 2.0 * y_y + tf.expand_dims(d_y, axis=0), 0.0)
d_z_z = tf.maximum(tf.expand_dims(d_z, axis=1) - 2.0 * z_z + tf.expand_dims(d_z, axis=0), 0.0)
d_x_y = tf.maximum(tf.expand_dims(d_x, axis=1) - 2.0 * x_y + tf.expand_dims(d_y, axis=0), 0.0)
d_y_z = tf.maximum(tf.expand_dims(d_y, axis=1) - 2.0 * y_z + tf.expand_dims(d_z, axis=0), 0.0)
d_x_z = tf.maximum(tf.expand_dims(d_x, axis=1) - 2.0 * x_z + tf.expand_dims(d_z, axis=0), 0.0)
if do_summary:
with tf.name_scope(None): # return to root scope to avoid scope overlap
tf.summary.histogram(scope_prefix + name + '/dxx', d_x_x)
tf.summary.histogram(scope_prefix + name + '/dyy', d_y_y)
tf.summary.histogram(scope_prefix + name + '/dzz', d_z_z)
tf.summary.histogram(scope_prefix + name + '/dxy', d_x_y)
tf.summary.histogram(scope_prefix + name + '/dyz', d_y_z)
tf.summary.histogram(scope_prefix + name + '/dxz', d_x_z)
return d_x_x, d_y_y, d_z_z, d_x_y, d_x_z, d_y_z
########################################################################
def get_dist_np(x, y):
""" This function calculates the pairwise distance between x and y using numpy
:param x: m-by-d array
:param y: n-by-d array
:return:
"""
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
x_expand = np.expand_dims(x, axis=2) # m-by-d-by-1
y_expand = np.expand_dims(y, axis=2) # n-by-d-by-1
y_permute = np.transpose(y_expand, axes=(2, 1, 0)) # 1-by-d-by-n
dxy = x_expand - y_permute # m-by-d-by-n, the first page is ai - b1
dist_xy = np.sqrt(np.sum(np.multiply(dxy, dxy), axis=1, dtype=np.float32)) # m-by-n, the first column is (ai-b1)^2
return dist_xy
#########################################################################
def get_batch_squared_dist(x_batch, y_batch=None, axis=1, mode='xx', name='squared_dist'):
""" This function calculates squared pairwise distance for vectors under xi or between xi and yi
where i refers to the samples in the batch
:param x_batch: batch_size-a-b tensor
:param y_batch: batch_size-c-d tensor
:param axis: the axis to be considered as features; if axis==1, a=c; if axis=2, b=d
:param mode: 'xxxyyy', 'xx', 'xy', 'xxxy'
:param name:
:return: dist tensor(s)
"""
# check inputs
assert axis in [1, 2], 'axis has to be 1 or 2.'
batch, a, b = x_batch.get_shape().as_list()
if y_batch is not None:
batch_y, c, d = y_batch.get_shape().as_list()
assert batch == batch_y, 'Batch sizes do not match.'
if axis == 1:
assert a == c, 'Feature sizes do not match.'
elif axis == 2:
assert b == d, 'Feature sizes do not match.'
if mode == 'xx':
mode = 'xy'
with tf.name_scope(name):
if mode in {'xx', 'xxxyyy', 'xxxy'}:
# xxt is batch-a-a if axis is 2 else batch-b-b
xxt = tf.matmul(x_batch, tf.transpose(x_batch, [0, 2, 1])) \
if axis == 2 else tf.matmul(tf.transpose(x_batch, [0, 2, 1]), x_batch)
# dx is batch-a if axis is 2 else batch-b
dx = tf.matrix_diag_part(xxt)
dist_xx = tf.maximum(tf.expand_dims(dx, axis=2) - 2.0 * xxt + tf.expand_dims(dx, axis=1), 0.0)
if mode == 'xx':
return dist_xx
elif mode == 'xxxy':
# xyt is batch-a-c if axis is 2 else batch-b-d
xyt = tf.matmul(x_batch, tf.transpose(y_batch, [0, 2, 1])) \
if axis == 2 else tf.matmul(tf.transpose(x_batch, [0, 2, 1]), y_batch)
# dy is batch-c if axis is 2 else batch-d
dy = tf.reduce_sum(tf.multiply(y_batch, y_batch), axis=axis)
dist_xy = tf.maximum(tf.expand_dims(dx, axis=2) - 2.0 * xyt + tf.expand_dims(dy, axis=1), 0.0)
return dist_xx, dist_xy
elif mode == 'xxxyyy':
# xyt is batch-a-c if axis is 2 else batch-b-d
xyt = tf.matmul(x_batch, tf.transpose(y_batch, [0, 2, 1])) \
if axis == 2 else tf.matmul(tf.transpose(x_batch, [0, 2, 1]), y_batch)
# yyt is batch-c-c if axis is 2 else batch-d-d
yyt = tf.matmul(y_batch, tf.transpose(y_batch, [0, 2, 1])) \
if axis == 2 else tf.matmul(tf.transpose(y_batch, [0, 2, 1]), y_batch)
# dy is batch-c if axis is 2 else batch-d
dy = tf.reduce_sum(tf.multiply(y_batch, y_batch), axis=axis)
dist_xy = tf.maximum(tf.expand_dims(dx, axis=2) - 2.0 * xyt + tf.expand_dims(dy, axis=1), 0.0)
dist_yy = tf.maximum(tf.expand_dims(dy, axis=2) - 2.0 * yyt + tf.expand_dims(dy, axis=1), 0.0)
return dist_xx, dist_xy, dist_yy
elif mode == 'xy':
# dx is batch-a if axis is 2 else batch-b
dx = tf.reduce_sum(tf.multiply(x_batch, x_batch), axis=axis)
# dy is batch-c if axis is 2 else batch-d
dy = tf.reduce_sum(tf.multiply(y_batch, y_batch), axis=axis)
# xyt is batch-a-c if axis is 2 else batch-b-d
xyt = tf.matmul(x_batch, tf.transpose(y_batch, [0, 2, 1])) \
if axis == 2 else tf.matmul(tf.transpose(x_batch, [0, 2, 1]), y_batch)
dist_xy = tf.maximum(tf.expand_dims(dx, axis=2) - 2.0 * xyt + tf.expand_dims(dy, axis=1), 0.0)
return dist_xy
else:
raise AttributeError('Mode {} not supported'.format(mode))
#######################################################################
def newton_root(x, f, df, step=None):
""" This function does one iteration update on x to find the root f(x)=0. It is primarily used as the body of
tf.while_loop.
:param x:
:param f: a function that receives x as input and outputs f(x) and other info for gradient calculation
:param df: a function that receives info as inputs and outputs the gradient of f at x
:param step:
:return:
"""
fx, info2grad = f(x)
gx = df(info2grad)
x = x - fx / (gx + FLAGS.EPSI)
if step is None:
return x
else:
return x, step + 1
#######################################################################
def matrix_mean_wo_diagonal(matrix, num_row, num_col=None, name='mu_wo_diag'):
""" This function calculates the mean of the matrix elements not in the diagonal
2018.4.9 - replace tf.diag_part with tf.matrix_diag_part
tf.matrix_diag_part can be used for rectangle matrix while tf.diag_part can only be used for square matrix
:param matrix:
:param num_row:
:type num_row: float
:param num_col:
:type num_col: float
:param name:
:return:
"""
with tf.name_scope(name):
if num_col is None:
mu = (tf.reduce_sum(matrix) - tf.reduce_sum(tf.matrix_diag_part(matrix))) / (num_row * (num_row - 1.0))
else:
mu = (tf.reduce_sum(matrix) - tf.reduce_sum(tf.matrix_diag_part(matrix))) \
/ (num_row * num_col - tf.minimum(num_col, num_row))
return mu
########################################################################
def row_mean_wo_diagonal(matrix, num_col, name='mu_wo_diag'):
""" This function calculates the mean of each row of the matrix elements excluding the diagonal
:param matrix:
:param num_col:
:type num_col: float
:param name:
:return:
"""
with tf.name_scope(name):
return (tf.reduce_sum(matrix, axis=1) - tf.matrix_diag_part(matrix)) / (num_col - 1.0)
#########################################################################
def mmd_t(
dist_xx, dist_xy, dist_yy, batch_size, alpha=1.0, beta=2.0, var_target=None, name='mmd',
do_summary=False, scope_prefix=''):
"""This function calculates the maximum mean discrepancy with t-distribution kernel
The code is inspired by the Github page of following paper:
<NAME>., <NAME>., <NAME>., <NAME>. (2018)
Demystifying MMD | |
# from sklearn.cluster._kmeans import *
import copy
from typing import Union
import torch
import torch.nn as nn
from sklearn.cluster._robustq import *
from .quantizer import Quantizer
__all__ = ['MiniBatchRobustqTorch', 'RobustqTorch']
class ClusterQuantizerBase(Quantizer):
def __init__(self, n_feature=1, n_clusters=8, name='',
quant_fun=lambda x: x):
super(ClusterQuantizerBase, self).__init__()
self.n_clusters = n_clusters
self.name = name
# specify the initial values for loading judgment
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
# specify the initial values for initial judgment
self.register_buffer("cluster_centers_", torch.zeros(n_clusters, n_feature))
self.quant_fun = quant_fun
def reset(self):
super().reset()
# self.labels_.zero_()
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=self.n_clusters).view(-1, 1))
def forward(self, inputs):
output = self.quant_func(inputs)
return output
def extra_repr(self) -> str:
return 'name={},cluster={}'.format(self.name, self.n_clusters)
@staticmethod
def quant_calib(net,wrapped_modules,calib_loader):
calib_layers=[]
n_calibration_steps=1
for name,module in wrapped_modules.items():
module.mode='calibration_forward'
calib_layers.append(name)
n_calibration_steps=max(n_calibration_steps,module.quantizer.n_calibration_steps)
print(f"prepare calibration for {calib_layers}\n n_calibration_steps={n_calibration_steps}")
for step in range(n_calibration_steps):
print(f"Start calibration step={step+1}")
for name,module in wrapped_modules.items():
module.quantizer.calibration_step=step+1
with torch.no_grad():
for inp,target in calib_loader:
inp=inp.cuda()
net(inp)
for name,module in wrapped_modules.items():
print(f"{name}: {module.quantizer}")
module.mode='qat_forward'
print("calibration finished")
class RobustqTorch(ClusterQuantizerBase):
def __init__(self, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
super(RobustqTorch, self).__init__(n_feature, n_clusters=n_clusters, name=name)
self.alpha = alpha
self.gamma = gamma
self.kmeans = RobustQ(n_clusters=n_clusters, **kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def init_layer_cluster_center(self, data, n_clusters, method="uniform"):
if method == "uniform" or data is None:
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=n_clusters).view(-1, 1))
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
else:
self.fit(data, tol=1e-2)
def reset(self):
super().reset()
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
def fit(self, X: torch.Tensor, y=None, sample_weight=None, n_init=None, init=None, tol=None):
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
bak = copy.deepcopy([self.kmeans.n_init, self.kmeans.init, self.kmeans.tol])
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = [new if new is not None else old
for new, old in zip((n_init, init, tol), bak)]
self.kmeans.fit(data, y=y, sample_weight=sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = bak
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
return self.kmeans.predict(data, sample_weight, var_std=self.alpha, var_weight=self.gamma)
def forward(self, inputs):
# To avoid fault fitness in initial iterations
# if (self.cluster_centers_.data == 0).all():
# # use uniform quantization to avoid further fitness with bad data
# self.init_layer_cluster_center(inputs, self.weight_qbit)
if self.calibration and not self.calibrated:
self.fit(inputs)
labels = self.labels_
weight_quan = self.cluster_centers_[:, 0][labels].view(inputs.shape)
elif self.training:
# label should change as weights are updated
labels = self.predict(inputs)
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = inputs - inputs.detach() + weight_quan_temp
else:
# to avoid load the model without pre-fitness
# if len(self.labels_.data) == 0:
# # self.labels_.data.copy_(torch.from_numpy(self.predict(inputs)).view(-1))
# self.register_buffer("labels_", torch.from_numpy(self.predict(inputs)).view(-1))
assert len(self.labels_.data)
labels = self.labels_
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = weight_quan_temp
return weight_quan
def extra_repr(self) -> str:
return super(RobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
class MiniBatchRobustqTorch(RobustqTorch):
def __init__(self, # batch_size, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
if "batch_size" in kwargs:
kwargs.pop("batch_size")
super().__init__(n_feature=n_feature, n_clusters=n_clusters, name=name,
alpha=alpha, gamma=gamma, q_level_init=q_level_init, **kwargs)
self.kmeans = MiniBatchRobustQ(n_clusters=n_clusters,**kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
self.kmeans.partial_fit(data, y, sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
def extra_repr(self) -> str:
return super(MiniBatchRobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
# TODO: Use close package
def insert_robust_quntizer(module:nn.Module, quantizer: Union[RobustqTorch, MiniBatchRobustqTorch], alpha, gamma):
for k, m in module.named_modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
n_samples = m.weight.numel()
n_clusters = 2 ** m.quanizer.w_bit - 1
batch_factor = 800
# if q_type == 'robust_batch':
if isinstance(quantizer, MiniBatchRobustqTorch):
m.quantizer.w_quantizer = MiniBatchRobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
batch_size=n_clusters * batch_factor
if n_clusters * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
# elif q_type == 'robust':
elif isinstance(quantizer, RobustqTorch):
m.quantizer.w_quantizer = RobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if __name__ == '__main__':
import numpy as np
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
torch.set_printoptions(3)
import sklearn
sklearn.show_versions()
a = {}
# vgg = models.vgg11(pretrained=True)
# if torch.cuda.is_available():
# vgg.cuda()
# a['state_dict'] = vgg.state_dict()
a = torch.load("plot/checkpoints/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
map_location=torch.device('cpu') if not torch.cuda.is_available() else torch.device('cuda'))
num_class = 7
batch_factor = 800
gamma = 0.
train_flg = False
robustq_torch_batch = []
robustq_sklean_batch = []
robustq_torch = []
robustq_sklean = []
kmeans_sklean = []
kmeans_sklean_batch = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
print(n_samples)
# from sklearn
kmeans_sklean.append(
KMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
kmeans_sklean_batch.append(
MiniBatchKMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor if num_class * 300 < int(
0.3 * n_samples) else int(0.2 * n_samples)))
# from Robustq
robustq_sklean.append(
RobustQ(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
robustq_sklean_batch.append(MiniBatchRobustQ(n_clusters=num_class,
n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples)))
# from clusterq
robustq_torch_batch_t = MiniBatchRobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_batch_t.eval()
robustq_torch_t = RobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_t.eval()
if torch.cuda.is_available():
robustq_torch_batch_t.cuda()
robustq_torch_t.cuda()
robustq_torch.append(robustq_torch_t)
robustq_torch_batch.append(robustq_torch_batch_t)
import sys
sys.path.append("../")
from utee.misc import time_measurement
@time_measurement(False, 0, 0)
def f1(quantizer_list, is_np=False):
print("start\n")
ix = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
data_o = v.detach().view(-1, 1)
if is_np:
data = data_o.cpu().numpy()
else:
data = data_o.cuda()
quantizer_list[ix].fit(data)
data_o = v.detach().view(-1, 1)
if is_np:
datac = data_o.cpu().numpy()
t = (datac != data)
tt = t if not isinstance(t, np.ndarray) else t.any()
# print("data is modified:", tt)
else:
datac = data_o.cuda()
t = (datac != data)
tt = t.any().item()
# print("data is modified:", tt)
if tt:
print("max difference:", ((datac - data_o)[t]).max())
ix += 1
# import visdom
#
# vis = visdom.Visdom()
class Visdom():
def bar(self, *args, **kwargs):
pass
def line(self, *args, **kwargs):
pass
vis = Visdom()
def plot(quantizer, name="None", is_np=False):
print(quantizer.labels_)
print(quantizer.cluster_centers_)
# ------------- visdom draw --------------
# histogram of weight distribution
qw = quantizer.cluster_centers_[:, 0][quantizer.labels_] # .view(weight.shape)
qw_hist = []
if is_np:
qw_v = np.unique(qw)
for v in qw_v:
qw_hist.append((qw == v).sum())
else:
qw_v = qw.unique()
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win=name + " hist",
opts=dict(title=name + " hist" + ' gamma={}'.format(gamma)))
# vis.histogram(qw, win=name+" hist",
# opts=dict(title=name+" hist"+' gamma={}'.format(gamma)))
# transform function
x = torch.arange(-1., 1., 0.01)
print(x.shape)
if is_np:
x = x.view(-1, 1).cpu().numpy()
elif torch.cuda.is_available():
x = x.view(-1, 1).cuda()
else:
x = x.view(-1, 1)
level1 = quantizer.cluster_centers_[:, 0][quantizer.predict(x)]
# print(level1.shape, x.shape)
vis.line(Y=level1, X=x.reshape(-1),
win=name,
opts=dict(title=name))
@time_measurement(False, 0, 0)
def get_q_loss(quantizer_list, is_np=False):
ix = 0
loss = 0
for n, v in a['state_dict'].items():
if | |
'_mpf_'):
return False
return x._mpf_ in (finf, fninf)
def isint(ctx, x):
"""
For an ``mpf`` *x*, or any type that can be converted
to ``mpf``, determines whether *x* is exactly
integer-valued::
>>> from mpmath import *
>>> isint(3), isint(mpf(3)), isint(3.2)
(True, True, False)
"""
if isinstance(x, int_types):
return True
try:
x = ctx.convert(x)
except:
return False
if hasattr(x, '_mpf_'):
if ctx.isnan(x) or ctx.isinf(x):
return False
return x == int(x)
return False
def _mpf_mag(ctx, x):
sign, man, exp, bc = x
if man:
return exp+bc
if x == fzero:
return ctx.ninf
if x == finf or x == fninf:
return ctx.inf
return ctx.nan
def mag(ctx, x):
if type(x) in int_types: # XXX: inttypes
if x:
return bitcount(abs(x))
return ctx.ninf
# Hack
if hasattr(x, "_mpq_"):
p, q = x._mpq_
if p:
return 1 + bitcount(abs(p)) - bitcount(abs(q))
return ctx.ninf
x = ctx.convert(x)
if hasattr(x, "_mpf_"):
return ctx._mpf_mag(x._mpf_)
if hasattr(x, "_mpc_"):
r, i = x._mpc_
if r == fzero:
return ctx._mpf_mag(i)
if i == fzero:
return ctx._mpf_mag(r)
return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i))
raise ValueError("mag() needed a number")
def nint_distance(ctx, x):
"""
Returns (n, d) where n is the nearest integer to x and d is the
log-2 distance (i.e. distance in bits) of n from x. If d < 0,
(-d) gives the bits of cancellation when n is subtracted from x.
This function is intended to be used to check for cancellation
at poles.
"""
x = ctx.convert(x)
if hasattr(x, "_mpf_"):
re = x._mpf_
im_dist = ctx.ninf
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
isign, iman, iexp, ibc = im
if iman:
im_dist = iexp + ibc
elif im == fzero:
im_dist = ctx.ninf
else:
raise ValueError("requires a finite number")
else:
raise TypeError("requires an mpf/mpc")
sign, man, exp, bc = re
shift = exp+bc
if sign:
man = -man
if shift < -1:
n = 0
re_dist = shift
elif man:
if exp >= 0:
n = man << exp
re_dist = ctx.ninf
else:
if shift >= 0:
xfixed = man << shift
else:
xfixed = man >> (-shift)
n1 = xfixed >> bc
n2 = -((-xfixed) >> bc)
dist1 = abs(xfixed - (n1<<bc))
dist2 = abs(xfixed - (n2<<bc))
if dist1 < dist2:
re_dist = dist1
n = n1
else:
re_dist = dist2
n = n2
if re_dist:
re_dist = bitcount(re_dist) - bc
else:
re_dist = ctx.ninf
elif re == fzero:
re_dist = ctx.ninf
n = 0
else:
raise ValueError("requires a finite number")
return n, max(re_dist, im_dist)
def chop(ctx, x, tol=None):
"""
Chops off small real or imaginary parts, or converts
numbers close to zero to exact zeros. The input can be a
single number or an iterable::
>>> from mpmath import *
>>> mp.dps = 15
>>> chop(5+1e-10j, tol=1e-9)
mpf('5.0')
>>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2]))
[1.0, 0.0, 3.0, -4.0, 2.0]
The tolerance defaults to ``100*eps``.
"""
if tol is None:
tol = 100*ctx.eps
try:
x = ctx.convert(x)
absx = abs(x)
if abs(x) < tol:
return ctx.zero
if ctx.is_complex_type(x):
if abs(x.imag) < min(tol, absx*tol):
return x.real
if abs(x.real) < min(tol, absx*tol):
return ctx.mpc(0, x.imag)
except TypeError:
if hasattr(x, "__iter__"):
return [ctx.chop(a, tol) for a in x]
return x
def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
r"""
Determine whether the difference between `s` and `t` is smaller
than a given epsilon, either relatively or absolutely.
Both a maximum relative difference and a maximum difference
('epsilons') may be specified. The absolute difference is
defined as `|s-t|` and the relative difference is defined
as `|s-t|/\max(|s|, |t|)`.
If only one epsilon is given, both are set to the same value.
If none is given, both epsilons are set to `2^{-p+m}` where
`p` is the current working precision and `m` is a small
integer. The default setting typically allows :func:`almosteq`
to be used to check for mathematical equality
in the presence of small rounding errors.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15
>>> almosteq(3.141592653589793, 3.141592653589790)
True
>>> almosteq(3.141592653589793, 3.141592653589700)
False
>>> almosteq(3.141592653589793, 3.141592653589700, 1e-10)
True
>>> almosteq(1e-20, 2e-20)
True
>>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0)
False
"""
t = ctx.convert(t)
if abs_eps is None and rel_eps is None:
rel_eps = abs_eps = ctx.make_mpf((0, 1, -ctx.prec+4, 1))
if abs_eps is None:
abs_eps = rel_eps
elif rel_eps is None:
rel_eps = abs_eps
diff = abs(s-t)
if diff <= abs_eps:
return True
abss = abs(s)
abst = abs(t)
if abss < abst:
err = diff/abst
else:
err = diff/abss
return err <= rel_eps
def fsum(ctx, terms, absolute=False, squared=False):
"""
Calculates a sum containing a finite number of terms (for infinite
series, see :func:`nsum`). The terms will be converted to
mpmath numbers. For len(terms) > 2, this function is generally
faster and produces more accurate results than the builtin
Python function :func:`sum`.
>>> from mpmath import *
>>> mp.dps = 15
>>> fsum([1, 2, 0.5, 7])
mpf('10.5')
With squared=True each term is squared, and with absolute=True
the absolute value of each term is used.
"""
prec, rnd = ctx._prec_rounding
real = []
imag = []
other = 0
for term in terms:
reval = imval = 0
if hasattr(term, "_mpf_"):
reval = term._mpf_
elif hasattr(term, "_mpc_"):
reval, imval = term._mpc_
else:
term = ctx.convert(term)
if hasattr(term, "_mpf_"):
reval = term._mpf_
elif hasattr(term, "_mpc_"):
reval, imval = term._mpc_
else:
if absolute: term = ctx.absmax(term)
if squared: term = term**2
other += term
continue
if imval:
if squared:
if absolute:
real.append(mpf_mul(reval,reval))
real.append(mpf_mul(imval,imval))
else:
reval, imval = mpc_pow_int((reval,imval),2,prec+10)
real.append(reval)
imag.append(imval)
elif absolute:
real.append(mpc_abs((reval,imval), prec))
else:
real.append(reval)
imag.append(imval)
else:
if squared:
reval = mpf_mul(reval, reval)
elif absolute:
reval = mpf_abs(reval)
real.append(reval)
s = mpf_sum(real, prec, rnd, absolute)
if imag:
s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
else:
s = ctx.make_mpf(s)
if other is 0:
return s
else:
return s + other
def fdot(ctx, A, B=None):
r"""
Computes the dot product of the iterables `A` and `B`,
.. math ::
\sum_{k=0} A_k B_k.
Alternatively, :func:`fdot` accepts a single iterable of pairs.
In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent.
The elements are automatically converted to mpmath numbers.
Examples::
>>> from mpmath import *
>>> mp.dps = 15
>>> A = [2, 1.5, 3]
>>> B = [1, -1, 2]
>>> fdot(A, B)
mpf('6.5')
>>> zip(A, B)
[(2, 1), (1.5, -1), (3, 2)]
>>> fdot(_)
mpf('6.5')
"""
if B:
A = zip(A, B)
prec, rnd = ctx._prec_rounding
real = []
imag = []
other = 0
hasattr_ = hasattr
types = (ctx.mpf, ctx.mpc)
for a, b in A:
if type(a) not in types: a = ctx.convert(a)
if type(b) not in types: b = ctx.convert(b)
a_real = hasattr_(a, "_mpf_")
b_real = hasattr_(b, "_mpf_")
if a_real and b_real:
real.append(mpf_mul(a._mpf_, b._mpf_))
continue
a_complex = hasattr_(a, "_mpc_")
b_complex = hasattr_(b, "_mpc_")
if a_real and b_complex:
aval = a._mpf_
bre, bim = b._mpc_
real.append(mpf_mul(aval, bre))
imag.append(mpf_mul(aval, bim))
elif b_real and a_complex:
are, aim = a._mpc_
bval = b._mpf_
real.append(mpf_mul(are, bval))
imag.append(mpf_mul(aim, bval))
elif a_complex and b_complex:
re, im = mpc_mul(a._mpc_, b._mpc_, prec+20)
real.append(re)
imag.append(im)
else:
other += a*b
s = mpf_sum(real, prec, rnd)
if imag:
s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
else:
s = ctx.make_mpf(s)
if other is 0:
return s
else:
return s + other
def fprod(ctx, factors):
r"""
Calculates a product containing a finite number of factors (for
infinite products, see :func:`nprod`). The factors will be
converted to mpmath numbers.
>>> from mpmath import *
>>> mp.dps = 15
>>> fprod([1, 2, 0.5, 7])
mpf('7.0')
"""
orig = ctx.prec
try:
v = ctx.one
for p in factors:
v *= p
finally:
ctx.prec = orig
return +v
def rand(ctx):
"""
Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
The number of randomly generated bits in the mantissa is equal
to the working precision.
"""
return ctx.make_mpf(mpf_rand(ctx._prec))
def fraction(ctx, p, q):
"""
Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
the fraction `p/q`. The value is updated with the | |
still does not exist
self.assertFalse(RecordField.exists(self.testcoll, "newfield"))
return
def test_post_new_field_missing_id(self):
f = field_view_form_data(action="new")
u = entitydata_edit_url("new", "testcoll", layout.FIELD_TYPEID, view_id="Field_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_FIELD_ID))
# Test context
expect_context = field_view_context_data(action="new")
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
return
def test_post_new_field_invalid_id(self):
f = field_view_form_data(field_id="!badfield", orig_id="orig_field_id", action="new")
u = entitydata_edit_url("new", "testcoll", layout.FIELD_TYPEID, view_id="Field_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_FIELD_ID))
# Test context
expect_context = field_view_context_data(
field_id="!badfield", orig_id="orig_field_id", action="new"
)
# print "@@ context %r"%(context_bind_fields(r.context)['fields'][9],)
# print "@@ context field value %r"%(context_bind_fields(r.context)['fields'][9]['field_value'],)
# print "@@ expect %r"%(expect_context['fields'][9],)
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
return
# -------- copy field --------
def test_post_copy_entity(self):
self.assertFalse(RecordField.exists(self.testcoll, "copyfield"))
f = field_view_form_data(
field_id="copyfield", action="copy",
property_uri="test:copy_prop",
)
u = entitydata_edit_url("copy", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="Entity_type"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", layout.FIELD_TYPEID))
# Check that new record type exists
self._check_field_data_values("copyfield", property_uri="test:copy_prop")
return
def test_post_copy_entity_cancel(self):
self.assertFalse(RecordField.exists(self.testcoll, "copyfield"))
f = field_view_form_data(
field_id="copyfield", action="copy", cancel="Cancel"
)
u = entitydata_edit_url("copy", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="Entity_type"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", layout.FIELD_TYPEID))
# Check that target record type still does not exist
self.assertFalse(RecordField.exists(self.testcoll, "copyfield"))
return
def test_post_copy_entity_missing_id(self):
f = field_view_form_data(action="copy")
u = entitydata_edit_url("copy", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="Entity_type"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_FIELD_ID))
expect_context = field_view_context_data(action="copy")
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
return
def test_post_copy_entity_invalid_id(self):
f = field_view_form_data(
field_id="!badentity", orig_id="orig_field_id", action="copy"
)
u = entitydata_edit_url("copy", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="Entity_type"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_FIELD_ID))
expect_context = field_view_context_data(
field_id="!badentity", orig_id="orig_field_id", action="copy"
)
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
return
# -------- edit field --------
def test_post_edit_entity(self):
self._create_view_data("editfield")
self._check_field_data_values("editfield")
f = field_view_form_data(
field_id="editfield", action="edit",
property_uri="test:edit_prop",
update="Updated entity"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="editfield"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", layout.FIELD_TYPEID))
self._check_field_data_values("editfield",
property_uri="test:edit_prop",
update="Updated entity"
)
return
def test_post_edit_entity_new_id(self):
self._create_view_data("editfieldid1")
self._check_field_data_values("editfieldid1")
# Now post edit form submission with different values and new id
f = field_view_form_data(
field_id="editfieldid2", orig_id="editfieldid1", action="edit",
property_uri="test:edit_prop"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="editfieldid1"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", layout.FIELD_TYPEID))
# Check that new record type exists and old does not
self.assertFalse(RecordField.exists(self.testcoll, "editfieldid1"))
self._check_field_data_values("editfieldid2", property_uri="test:edit_prop")
return
def test_post_edit_entity_cancel(self):
self._create_view_data("editfield")
self._check_field_data_values("editfield")
# Post from cancelled edit form
f = field_view_form_data(
field_id="editfield", action="edit", cancel="Cancel", update="Updated entity"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="editfield"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", layout.FIELD_TYPEID))
# Check that target record type still does not exist and unchanged
self._check_field_data_values("editfield")
return
def test_post_edit_entity_missing_id(self):
self._create_view_data("editfield")
self._check_field_data_values("editfield")
# Form post with ID missing
f = field_view_form_data(action="edit", update="Updated entity")
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="editfield"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_FIELD_ID))
# Test context for re-rendered form
expect_context = field_view_context_data(action="edit", update="Updated entity")
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
# Check stored entity is unchanged
self._check_field_data_values("editfield")
return
def test_post_edit_entity_invalid_id(self):
self._create_view_data("editfield")
self._check_field_data_values("editfield")
# Form post with ID malformed
f = field_view_form_data(
field_id="!badfieldid", orig_id="orig_field_id", action="edit"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="fieldid"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_FIELD_ID))
# Test context for re-rendered form
expect_context = field_view_context_data(
field_id="!badfieldid", orig_id="orig_field_id", action="edit"
)
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
# Check stored entity is unchanged
self._check_field_data_values("editfield")
return
# -------- define repeat field and group --------
def test_define_repeat_field_task(self):
# @@TODO: In due course, this will be deprecated
# Create new field entity
self._create_view_data("taskrepeatfield")
self._check_field_data_values("taskrepeatfield")
# Post define repeat field
f = field_view_form_data(
field_id="taskrepeatfield",
field_label="Test repeat field",
entity_type="test:repeat_field",
property_uri="test:repeat_prop",
value_type="annal:Text",
field_placement="small:0,12",
task="Define_repeat_field"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="taskrepeatfield"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
# Check content of type, view and list
common_vals = (
{ 'coll_id': "testcoll"
, 'field_id': "taskrepeatfield"
, 'field_label': "Test repeat field"
, 'type_uri': "test:repeat_field"
, 'property_uri': "test:repeat_prop"
, 'field_typeid': layout.FIELD_TYPEID
})
tgt_field_id = "%(field_id)s"%common_vals
tgt_field_uri = "%(property_uri)s"%common_vals
rpt_field_id = tgt_field_id + layout.SUFFIX_SEQUENCE
rpt_field_uri = "%(property_uri)s"%(common_vals) + layout.SUFFIX_SEQUENCE_P
expect_field_values = (
{ "annal:id": tgt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": "%(field_label)s"%common_vals
, "annal:field_render_type": "_enum_render_type/Text"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_value_type": "annal:Text"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:property_uri": tgt_field_uri
, "annal:field_placement": "small:0,12"
})
expect_repeat_field_values = (
{ "annal:id": rpt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": message.LIST_FIELD_LABEL%common_vals
, "annal:field_render_type": "_enum_render_type/Group_Seq_Row"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:field_value_type": "annal:Field_list"
, "annal:property_uri": rpt_field_uri
, "annal:field_placement": "small:0,12"
, "annal:placeholder": message.LIST_FIELD_PLACEHOLDER%common_vals
, "annal:repeat_label_add": "Add %(field_label)s"%common_vals
, "annal:repeat_label_delete": "Remove %(field_label)s"%common_vals
, "annal:field_fields":
[ { "annal:field_id": "%(field_typeid)s/%(field_id)s"%common_vals
, "annal:property_uri": tgt_field_uri
, "annal:field_placement": "small:0,12"
}
]
})
self.check_entity_values(layout.FIELD_TYPEID, tgt_field_id, expect_field_values)
# self.check_entity_values(layout.GROUP_TYPEID, rpt_group_id, expect_repeat_group_values)
self.check_entity_values(layout.FIELD_TYPEID, rpt_field_id, expect_repeat_field_values)
return
def test_define_list_field_task(self):
# Create new field entity
self._create_view_data("tasklistfield")
self._check_field_data_values("tasklistfield")
# Post define repeat field
f = field_view_form_data(
field_id="tasklistfield",
field_label="Test list field",
entity_type="test:list_field",
property_uri="test:list_prop",
value_type="annal:Text",
field_placement="small:0,12",
task="Define_list_field"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="tasklistfield"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
# Check content of type, view and list
common_vals = (
{ 'coll_id': "testcoll"
, 'field_id': "tasklistfield"
, 'field_label': "Test list field"
, 'type_uri': "test:list_field"
, 'property_uri': "test:list_prop"
, 'field_typeid': layout.FIELD_TYPEID
})
tgt_field_id = "%(field_id)s"%common_vals
tgt_field_uri = "%(property_uri)s"%common_vals
rpt_field_id = tgt_field_id + layout.SUFFIX_SEQUENCE
rpt_field_uri = "%(property_uri)s"%(common_vals) + layout.SUFFIX_SEQUENCE_P
expect_field_values = (
{ "annal:id": tgt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": "%(field_label)s"%common_vals
, "annal:field_render_type": "_enum_render_type/Text"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_value_type": "annal:Text"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:property_uri": tgt_field_uri
, "annal:field_placement": "small:0,12"
})
expect_list_field_values = (
{ "annal:id": rpt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": message.LIST_FIELD_LABEL%common_vals
, "annal:field_render_type": "_enum_render_type/Group_Seq_Row"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:field_value_type": "annal:Field_list"
, "annal:property_uri": rpt_field_uri
, "annal:field_placement": "small:0,12"
, "annal:placeholder": message.LIST_FIELD_PLACEHOLDER%common_vals
, "annal:repeat_label_add": "Add %(field_label)s"%common_vals
, "annal:repeat_label_delete": "Remove %(field_label)s"%common_vals
, "annal:field_fields":
[ { "annal:field_id": "%(field_typeid)s/%(field_id)s"%common_vals
, "annal:property_uri": tgt_field_uri
, "annal:field_placement": "small:0,12"
}
]
})
self.check_entity_values(layout.FIELD_TYPEID, tgt_field_id, expect_field_values)
self.check_entity_values(layout.FIELD_TYPEID, rpt_field_id, expect_list_field_values)
return
def test_define_many_field_task(self):
# Create new field entity
self._create_view_data("taskmanyfield")
self._check_field_data_values("taskmanyfield")
# Post define multi-value field
f = field_view_form_data(
field_id="taskmanyfield",
field_label="Test many field",
entity_type="test:many_field",
property_uri="test:many_prop",
value_type="annal:Text",
field_placement="small:0,12",
task="Define_many_field"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id="taskmanyfield"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
# Check content of field
common_vals = (
{ 'coll_id': "testcoll"
, 'field_id': "taskmanyfield"
, 'field_label': "Test many field"
, 'type_uri': "test:many_field"
, 'property_uri': "test:many_prop"
, 'field_typeid': layout.FIELD_TYPEID
})
tgt_field_id = "%(field_id)s"%common_vals
tgt_field_uri = "%(property_uri)s"%common_vals
rpt_field_id = tgt_field_id + layout.SUFFIX_REPEAT
rpt_field_uri = "%(property_uri)s"%(common_vals)
expect_field_values = (
{ "annal:id": tgt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": "%(field_label)s"%common_vals
, "annal:field_render_type": "_enum_render_type/Text"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_value_type": "annal:Text"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:property_uri": tgt_field_uri
, "annal:field_placement": "small:0,12"
})
expect_many_field_values = (
{ "annal:id": rpt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": message.MANY_FIELD_LABEL%common_vals
, "annal:field_render_type": "_enum_render_type/Group_Set_Row"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:field_value_type": "%(type_uri)s"%common_vals
, "annal:property_uri": rpt_field_uri
, "annal:field_placement": "small:0,12"
, "annal:placeholder": message.MANY_FIELD_PLACEHOLDER%common_vals
, "annal:repeat_label_add": "Add %(field_label)s"%common_vals
, "annal:repeat_label_delete": "Remove %(field_label)s"%common_vals
, "annal:field_fields":
[ { "annal:field_id": "%(field_typeid)s/%(field_id)s"%common_vals
, "annal:property_uri": "@id"
, "annal:field_placement": "small:0,12"
}
]
})
self.check_entity_values(layout.FIELD_TYPEID, tgt_field_id, expect_field_values)
self.check_entity_values(layout.FIELD_TYPEID, rpt_field_id, expect_many_field_values)
return
def test_define_field_reference_task(self):
common_vals = (
{ 'coll_id': "testcoll"
, 'field_id': "taskfieldreference"
, 'field_ref_id': "taskfieldreference"+layout.SUFFIX_MULTI
, 'field_label': "Test reference field"
, 'type_uri': "test:ref_field"
, 'property_uri': "test:ref_prop"
, 'field_typeid': layout.FIELD_TYPEID
})
# Create new field
self._create_view_data(common_vals["field_id"])
self._check_field_data_values(common_vals["field_id"])
# Post define field reference
f = field_view_form_data(
field_id=common_vals["field_id"],
field_label=common_vals["field_label"],
entity_type=common_vals["type_uri"],
property_uri=common_vals["property_uri"],
value_type="annal:Text",
field_placement="small:0,12",
task="Define_field_ref"
)
u = entitydata_edit_url("edit", "testcoll",
type_id=layout.FIELD_TYPEID, view_id="Field_view", entity_id=common_vals["field_id"]
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url(action="edit",
coll_id="testcoll", type_id=layout.FIELD_TYPEID, entity_id=common_vals["field_ref_id"],
view_id="Field_view"
)
self.assertIn(v, r['location'])
w = "Created%%20reference%%20to%%20field%%20'%(field_id)s'"%common_vals
self.assertIn(w, r['location'])
# Check content of type, view and list
tgt_field_id = "%(field_id)s"%common_vals
tgt_field_uri = "%(property_uri)s"%common_vals
ref_field_id = tgt_field_id + layout.SUFFIX_MULTI
ref_field_uri = "%(property_uri)s"%(common_vals) + layout.SUFFIX_MULTI_P
expect_field_values = (
{ "annal:id": tgt_field_id
, "annal:type": "annal:Field"
, "rdfs:label": "%(field_label)s"%common_vals
, "annal:field_render_type": "_enum_render_type/Text"
, "annal:field_value_mode": "_enum_value_mode/Value_direct"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:field_value_type": "annal:Text"
, "annal:property_uri": tgt_field_uri
, "annal:field_placement": "small:0,12"
})
expect_ref_field_values = (
{ "annal:id": ref_field_id
, "annal:type": "annal:Field"
, "rdfs:label": message.FIELD_REF_LABEL%common_vals
, "annal:field_render_type": "_enum_render_type/RefMultifield"
, "annal:field_value_mode": "_enum_value_mode/Value_entity"
, "annal:field_entity_type": "%(type_uri)s"%common_vals
, "annal:field_value_type": "annal:Field_list"
, "annal:property_uri": ref_field_uri
| |
######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
from botocore.client import Config
import paramiko
from xml.dom import minidom
import ast
import time
import os
import string
import logging
log_level = str(os.environ.get('LOG_LEVEL')).upper()
if log_level not in ['DEBUG', 'INFO','WARNING', 'ERROR','CRITICAL']:
log_level = 'ERROR'
log = logging.getLogger()
log.setLevel(log_level)
config_file=str(os.environ.get('CONFIG_FILE'))
#These S3 endpoint URLs are provided to support VPC endpoints for S3 in regions such as Frankfort that require explicit region endpoint definition
endpoint_url = {
"us-east-1" : "https://s3.amazonaws.com",
"us-east-2" : "https://s3-us-east-2.amazonaws.com",
"us-west-1" : "https://s3-us-west-1.amazonaws.com",
"us-west-2" : "https://s3-us-west-2.amazonaws.com",
"eu-west-1" : "https://s3-eu-west-1.amazonaws.com",
"eu-west-2" : "https://s3-eu-west-2.amazonaws.com",
"eu-central-1" : "https://s3-eu-central-1.amazonaws.com",
"ca-central-1" : "https://s3-ca-central-1.amazonaws.com",
"ap-northeast-1" : "https://s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2" : "https://s3-ap-northeast-2.amazonaws.com",
"ap-south-1" : "https://s3-ap-south-1.amazonaws.com",
"ap-southeast-1" : "https://s3-ap-southeast-1.amazonaws.com",
"ap-southeast-2" : "https://s3-ap-southeast-2.amazonaws.com",
"sa-east-1" : "https://s3-sa-east-1.amazonaws.com"
}
#Logic to determine when the prompt has been discovered
def prompt(chan):
buff = ''
while not buff.endswith('#'):
resp = chan.recv(9999)
buff += resp
#log.debug("%s",resp)
return buff
# Logic to figure out the next availble tunnel
def getNextTunnelId(ssh):
log.debug('Start getNextTunnelId')
ssh.send('term len 0\n')
log.debug("%s",prompt(ssh))
ssh.send('config t\n')
log.debug("%s",prompt(ssh))
ssh.send('do show int summary | include Tunnel\n')
output = prompt(ssh)
log.debug("%s",output)
ssh.send('exit\n')
log.debug("%s",prompt(ssh))
lastTunnelNum=''
if len(output.split('`n')) >= 199:
log.error("Tunnel ID greater than 199")
raise Exception
for line in output.split('\n'):
line=line.replace('* Tunnel','Tunnel')
log.debug("%s",line)
if line.strip()[:6] == 'Tunnel':
lastTunnelNum = line.strip().partition(' ')[0].replace('Tunnel','')
# add 100 modifier instead of default 1
if lastTunnelNum == '':
return 100
return int(lastTunnelNum) + 1
# Logic to figure out existing tunnel IDs
def getExistingTunnelId(ssh,vpn_connection_id):
log.debug('Start getExistingTunnelId')
ssh.send('term len 0\n')
log.debug("%s",prompt(ssh))
#ssh.send('config t\n')
#log.debug("%s",prompt(ssh))
#Display keyrings so we can derive tunnelId
ssh.send('show run | include crypto keyring\n')
output = prompt(ssh)
log.debug("%s",output)
tunnelNum=0
#Now parse crypto keyring lines for keyring-vpn-connection_id-tunnelId
for line in output.split('\n'):
if vpn_connection_id in line:
tmpNum = int(line.split('-')[-1])
if tunnelNum < tmpNum:
tunnelNum = tmpNum
if tunnelNum == 0:
log.error('Unable to find existing tunnels for %s', vpn_connection_id)
return 0
#Parsing logic gets the greater of the two tunnel numbers, so return tunnelNum -1 to get the first tunnel number
return tunnelNum-1
#Generic logic to push pre-generated Cisco config to the router
def pushConfig(ssh,config):
#log.info("Starting to push config")
#ssh.send('term len 0\n')
#prompt(ssh)
ssh.send('config t\n')
log.debug("%s",prompt(ssh))
stime = time.time()
for line in config:
if line == "WAIT":
log.debug("Waiting 30 seconds...")
time.sleep(30)
else:
ssh.send(line+'\n')
log.debug("%s",prompt(ssh))
ssh.send('exit\n')
log.debug("%s",prompt(ssh))
log.debug(" --- %s seconds ---", (time.time() - stime))
log.info("Saving config!")
ssh.send('copy run start\n\n\n\n\n')
log.info("%s",prompt(ssh))
log.info("Update complete!")
#Get a list of all objects that have not been processed for batch processesing of config files
#This is a departure from the original as that was triggered by S3 put events
#Uses simple boto call to s3 list_objects to generate a list of files to get config from
def getUnprocessedList(bucket_name, bucket_prefix):
unprocessed_list=[]
s3=boto3.client('s3',
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
result=s3.list_objects(Bucket=bucket_name,Prefix=bucket_prefix)
log.info("Getting a list of all unprocessed items...")
for r in result['Contents']:
if "processed" in r["Key"]:
continue
filename=r['Key']
log.info("adding filename %s to unprocessed list", filename)
if filename:
unprocessed_list.append(filename)
return unprocessed_list
#In order to track progress and only process once, we need to move the objects to a new prefix
#This prevents the function above from adding it to the unprocessed list
#Use S3 copy_object and deletes the original once done as there is no move method available
def moveToProcessed(bucket_name,bucket_key,bucket_prefix_full,s3_url):
filename=bucket_key.split("/")[-1]
dest_path=bucket_prefix_full + "processed/"
key_pth=dest_path + filename
log.info("copying the config file to processed directory...")
log.info("the bucket_key is - %s", bucket_key)
log.info("the bucket prefix full is - %s", bucket_prefix_full)
log.info("the processed file details are - %s ", filename)
log.info("the dest_path is - %s", dest_path)
log.info("the bucket_name is - %s",bucket_name)
log.info("the CopySource is %s",bucket_name + "/" + bucket_key)
log.info("the Key is %s",key_pth)
sthree=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
sthree.copy_object(Bucket=bucket_name,
CopySource=bucket_name + "/" + bucket_key,
ServerSideEncryption='aws:kms', Key=dest_path + filename)
log.info("deleting the processed config file...")
sthree.delete_object(Bucket=bucket_name, Key=bucket_key)
return
#Logic to determine the bucket prefix from the S3 key name that was provided
def getBucketPrefix(bucket_name, bucket_key):
#Figure out prefix from known bucket_name and bucket_key
bucket_prefix = '/'.join(bucket_key.split('/')[:-2])
if len(bucket_prefix) > 0:
bucket_prefix += '/'
return bucket_prefix
#Logic to download the transit VPC configuration file from S3
def getTransitConfig(bucket_name, bucket_prefix, s3_url, config_file):
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Downloading config file: %s/%s/%s%s", s3_url, bucket_name, bucket_prefix,config_file)
return ast.literal_eval(s3.get_object(Bucket=bucket_name,Key=bucket_prefix+config_file)['Body'].read())
#Logic to upload a new/updated transit VPC configuration file to S3 (not currently used)
def putTransitConfig(bucket_name, bucket_prefix, s3_url, config_file, config):
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Uploading new config file: %s/%s/%s%s", s3_url,bucket_name, bucket_prefix,config_file)
s3.put_object(Bucket=bucket_name,Key=bucket_prefix+config_file,Body=str(config))
#Logic to download the SSH private key from S3 to be used for SSH public key authentication
#Have left this code in, however, it is not used currently
def downloadPrivateKey(bucket_name, bucket_prefix, s3_url, prikey):
if os.path.exists('/tmp/'+prikey):
os.remove('/tmp/'+prikey)
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Downloading private key: %s/%s/%s%s",s3_url, bucket_name, bucket_prefix, prikey)
s3.download_file(bucket_name,bucket_prefix+prikey, '/tmp/'+prikey)
#Logic to create the appropriate Cisco configuration
#Bespoke updates to cisco config...
def create_cisco_config(bucket_name, bucket_key, s3_url, bgp_asn, ssh):
log.info("Processing %s/%s", bucket_name, bucket_key)
#Download the VPN configuration XML document
s3=boto3.client('s3',endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
config=s3.get_object(Bucket=bucket_name,Key=bucket_key)
xmldoc=minidom.parseString(config['Body'].read())
#Extract transit_vpc_configuration values
vpn_config = xmldoc.getElementsByTagName("transit_vpc_config")[0]
account_id = vpn_config.getElementsByTagName("account_id")[0].firstChild.data
vpn_endpoint = vpn_config.getElementsByTagName("vpn_endpoint")[0].firstChild.data
vpn_status = vpn_config.getElementsByTagName("status")[0].firstChild.data
preferred_path = vpn_config.getElementsByTagName("preferred_path")[0].firstChild.data
#Extract VPN connection information
vpn_connection=xmldoc.getElementsByTagName('vpn_connection')[0]
vpn_connection_id=vpn_connection.attributes['id'].value
customer_gateway_id=vpn_connection.getElementsByTagName("customer_gateway_id")[0].firstChild.data
vpn_gateway_id=vpn_connection.getElementsByTagName("vpn_gateway_id")[0].firstChild.data
vpn_connection_type=vpn_connection.getElementsByTagName("vpn_connection_type")[0].firstChild.data
#Determine the VPN tunnels to work with
if vpn_status == 'create':
tunnelId=getNextTunnelId(ssh)
else:
tunnelId=getExistingTunnelId(ssh,vpn_connection_id)
if tunnelId == 0:
return
log.info("%s %s with tunnel #%s and #%s.",vpn_status, vpn_connection_id, tunnelId, tunnelId+1)
# Create or delete the VRF for this connection
if vpn_status == 'delete':
log.info("we're not doing deletes yet")
raise Exception
#ipsec_tunnel = vpn_connection.getElementsByTagName("ipsec_tunnel")[0]
#customer_gateway=ipsec_tunnel.getElementsByTagName("customer_gateway")[0]
#customer_gateway_bgp_asn=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
##Remove VPN configuration for both tunnels
#config_text = ['router bgp {}'.format(customer_gateway_bgp_asn)]
#config_text.append(' no address-family ipv4 vrf {}'.format(vpn_connection_id))
#config_text.append('exit')
#config_text.append('no ip vrf {}'.format(vpn_connection_id))
#config_text.append('interface Tunnel{}'.format(tunnelId))
#config_text.append(' shutdown')
#config_text.append('exit')
#config_text.append('no interface Tunnel{}'.format(tunnelId))
#config_text.append('interface Tunnel{}'.format(tunnelId+1))
#config_text.append(' shutdown')
#config_text.append('exit')
#config_text.append('no interface Tunnel{}'.format(tunnelId+1))
#config_text.append('no route-map rm-{} permit'.format(vpn_connection_id))
## Cisco requires waiting 60 seconds before removing the isakmp profile
#config_text.append('WAIT')
#config_text.append('WAIT')
#config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId))
#config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId+1))
#config_text.append('no crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
#config_text.append('no crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId+1))
else:
# Create global tunnel configuration
config_text = ['ip vrf {}'.format(vpn_connection_id)]
config_text.append(' rd {}:{}'.format(bgp_asn, tunnelId))
config_text.append(' route-target export {}:100'.format(bgp_asn))
config_text.append(' route-target import {}:200'.format(bgp_asn))
config_text.append('exit')
# Check to see if a route map is needed for creating a preferred path
if preferred_path != 'none':
config_text.append('route-map rm-{} permit'.format(vpn_connection_id))
# If the preferred path is this transit VPC vpn endpoint, then set a shorter as-path prepend than if it is not
if preferred_path == vpn_endpoint:
config_text.append(' set as-path prepend {}'.format(bgp_asn))
else:
config_text.append(' set as-path prepend {} {}'.format(bgp_asn, bgp_asn))
config_text.append('exit')
# Create tunnel specific configuration
for ipsec_tunnel in vpn_connection.getElementsByTagName("ipsec_tunnel"):
customer_gateway=ipsec_tunnel.getElementsByTagName("customer_gateway")[0]
customer_gateway_tunnel_outside_address=customer_gateway.getElementsByTagName("tunnel_outside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
customer_gateway_tunnel_inside_address_ip_address=customer_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
customer_gateway_tunnel_inside_address_network_mask=customer_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_mask")[0].firstChild.data
customer_gateway_tunnel_inside_address_network_cidr=customer_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_cidr")[0].firstChild.data
customer_gateway_bgp_asn=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
customer_gateway_bgp_hold_time=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data
vpn_gateway=ipsec_tunnel.getElementsByTagName("vpn_gateway")[0]
vpn_gateway_tunnel_outside_address=vpn_gateway.getElementsByTagName("tunnel_outside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
vpn_gateway_tunnel_inside_address_ip_address=vpn_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
vpn_gateway_tunnel_inside_address_network_mask=vpn_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_mask")[0].firstChild.data
vpn_gateway_tunnel_inside_address_network_cidr=vpn_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_cidr")[0].firstChild.data
vpn_gateway_bgp_asn=vpn_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
vpn_gateway_bgp_hold_time=vpn_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data
ike=ipsec_tunnel.getElementsByTagName("ike")[0]
ike_authentication_protocol=ike.getElementsByTagName("authentication_protocol")[0].firstChild.data
ike_encryption_protocol=ike.getElementsByTagName("encryption_protocol")[0].firstChild.data
ike_lifetime=ike.getElementsByTagName("lifetime")[0].firstChild.data
ike_perfect_forward_secrecy=ike.getElementsByTagName("perfect_forward_secrecy")[0].firstChild.data
ike_mode=ike.getElementsByTagName("mode")[0].firstChild.data
ike_pre_shared_key=ike.getElementsByTagName("pre_shared_key")[0].firstChild.data
ipsec=ipsec_tunnel.getElementsByTagName("ipsec")[0]
ipsec_protocol=ipsec.getElementsByTagName("protocol")[0].firstChild.data
ipsec_authentication_protocol=ipsec.getElementsByTagName("authentication_protocol")[0].firstChild.data
ipsec_encryption_protocol=ipsec.getElementsByTagName("encryption_protocol")[0].firstChild.data
ipsec_lifetime=ipsec.getElementsByTagName("lifetime")[0].firstChild.data
ipsec_perfect_forward_secrecy=ipsec.getElementsByTagName("perfect_forward_secrecy")[0].firstChild.data
ipsec_mode=ipsec.getElementsByTagName("mode")[0].firstChild.data
ipsec_clear_df_bit=ipsec.getElementsByTagName("clear_df_bit")[0].firstChild.data
ipsec_fragmentation_before_encryption=ipsec.getElementsByTagName("fragmentation_before_encryption")[0].firstChild.data
ipsec_tcp_mss_adjustment=ipsec.getElementsByTagName("tcp_mss_adjustment")[0].firstChild.data
ipsec_dead_peer_detection_interval=ipsec.getElementsByTagName("dead_peer_detection")[0].getElementsByTagName("interval")[0].firstChild.data
ipsec_dead_peer_detection_retries=ipsec.getElementsByTagName("dead_peer_detection")[0].getElementsByTagName("retries")[0].firstChild.data
config_text.append('crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append(' local-address GigabitEthernet1')
config_text.append(' pre-shared-key address {} key {}'.format(vpn_gateway_tunnel_outside_address, ike_pre_shared_key))
config_text.append('exit')
config_text.append('crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append(' local-address GigabitEthernet1')
config_text.append(' match identity address {}'.format(vpn_gateway_tunnel_outside_address))
config_text.append(' keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append('exit')
config_text.append('interface Tunnel{}'.format(tunnelId))
config_text.append(' description {} from {} to {} for account {}'.format(vpn_connection_id, vpn_gateway_id, customer_gateway_id, account_id))
config_text.append(' bandwidth 1000000')
config_text.append(' ip mtu 1340')
config_text.append(' ip tcp adjust-mss 1300')
config_text.append(' ip vrf forwarding {}'.format(vpn_connection_id))
config_text.append(' ip address {} 255.255.255.252'.format(customer_gateway_tunnel_inside_address_ip_address))
config_text.append(' ip virtual-reassembly')
config_text.append(' tunnel source GigabitEthernet1')
config_text.append(' tunnel destination {} '.format(vpn_gateway_tunnel_outside_address))
config_text.append(' tunnel mode ipsec ipv4')
config_text.append(' tunnel protection ipsec profile ipsec-vpn-aws')
config_text.append(' ip tcp adjust-mss 1387')
config_text.append(' no shutdown')
config_text.append('exit')
config_text.append('router bgp {}'.format(customer_gateway_bgp_asn))
config_text.append(' address-family ipv4 vrf {}'.format(vpn_connection_id))
config_text.append(' neighbor {} remote-as {}'.format(vpn_gateway_tunnel_inside_address_ip_address, vpn_gateway_bgp_asn))
config_text.append(' neighbor {} maximum-prefix 2'.format(vpn_gateway_tunnel_inside_address_ip_address))
if preferred_path != 'none':
config_text.append(' neighbor {} route-map rm-{} out'.format(vpn_gateway_tunnel_inside_address_ip_address, vpn_connection_id))
config_text.append(' neighbor {} timers 10 30 30'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} activate'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} as-override'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} soft-reconfiguration inbound'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} next-hop-self'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append('exit')
config_text.append('exit')
#Increment tunnel ID for going onto the next tunnel
tunnelId+=1
log.debug("Conversion complete")
return config_text
def lambda_handler(event, context):
'''
Main lambda handler which has been | |
file's information.
Positional arguments:
executable_directory (str) -- the file path to temporary directory's 'Scripts' directory
executable (str) -- the filename of the script to add to paths.json
"""
with open(os.path.join(executable_directory, executable), 'rb') as script_file:
script_file_contents = script_file.read()
new_path = {"_path": "Scripts/{}" .format(executable),
"path_type": "hardlink",
"sha256": hashlib.sha256(script_file_contents).hexdigest(),
"size_in_bytes": os.path.getsize(
os.path.join(executable_directory, executable))
}
return new_path
def update_paths_file(temp_dir, target_platform):
"""Update the paths.json file when converting between platforms.
Positional arguments:
temp_dir (str) -- the file path to the temporary directory containing the source
package's extracted contents
target_platform (str) -- the platform to target: 'unix' or 'win'
"""
paths_file = os.path.join(temp_dir, 'info/paths.json')
if os.path.isfile(paths_file):
with open(paths_file) as file:
paths = json.load(file)
if target_platform == 'win':
for path in paths['paths']:
if path['_path'].startswith('lib'):
path['_path'] = update_lib_path(path['_path'], 'win')
elif path['_path'].startswith('bin'):
path['_path'] = update_executable_path(temp_dir, path['_path'], 'win')
path['sha256'] = update_executable_sha(temp_dir, path['_path'])
path['size_in_bytes'] = update_executable_size(temp_dir, path['_path'])
path['_path'] = path['_path'].replace('\\', '/').replace('\\\\', '/')
script_directory = os.path.join(temp_dir, 'Scripts')
if os.path.isdir(script_directory):
for script in os.listdir(script_directory):
if script.endswith('.exe'):
paths['paths'].append(add_new_windows_path(script_directory, script))
elif target_platform == 'unix':
for path in paths['paths']:
if path['_path'].startswith('Lib'):
path['_path'] = update_lib_path(path['_path'], 'unix', temp_dir)
elif path['_path'].startswith('Scripts'):
path['_path'] = update_executable_path(temp_dir, path['_path'], 'unix')
path['sha256'] = update_executable_sha(temp_dir, path['_path'])
path['size_in_bytes'] = update_executable_size(temp_dir, path['_path'])
path['_path'] = path['_path'].replace('\\', '/').replace('\\\\', '/')
if path['_path'].endswith(('.bat', '.exe')):
paths['paths'].remove(path)
with open(paths_file, 'w') as file:
json.dump(paths, file, indent=2)
def retrieve_executable_name(executable):
"""Retrieve the name of the executable to rename.
When converting between unix and windows, we need to be careful
that the executables are renamed without their file extensions.
Positional arguments:
executable (str) -- the executable to rename including its file extension
"""
return os.path.splitext(os.path.basename(executable))[0]
def is_binary_file(directory, executable):
"""Read a file's contents to check whether it is a binary file.
When converting files, we need to check that binary files are not
converted.
Source: https://stackoverflow.com/questions/898669/
Positional arguments:
directory (str) -- the file path to the 'bin' or 'Scripts' directory
executable (str) -- the name of the executable to rename
"""
file_path = os.path.join(directory, executable)
if os.path.isfile(file_path):
with open(file_path, 'rb') as buffered_file:
file_contents = buffered_file.read(1024)
text_characters = bytearray({7, 8, 9, 10, 12, 13, 27}.union(
set(range(0x20, 0x100)) - {0x7f}))
return bool(file_contents.translate(None, text_characters))
return False
def rename_executable(directory, executable, target_platform):
"""Rename an executable file when converting between platforms.
When converting from unix to windows, each file inside the 'bin' directory
is renamed to include '-script.py' as a suffix. When converting from windows
to unix, each executable inside the 'Scripts' directory has its '-script.py'
suffix removed.
Positional arguments:
directory (str) -- the file path to the 'bin' or 'Scripts' directory
executable (str) -- the name of the executable to rename
target_platform (str) -- the platform to target: 'unix' or 'win'
"""
old_executable_path = os.path.join(directory, executable)
if target_platform == 'win':
new_executable_path = os.path.join(directory, '{}-script.py' .format(
retrieve_executable_name(executable)))
with open(old_executable_path) as script_file_in:
lines = script_file_in.read().splitlines()
with open(old_executable_path, 'w') as script_file_out:
for line in lines[1:]:
script_file_out.write(line + '\n')
os.renames(old_executable_path, new_executable_path)
else:
if old_executable_path.endswith('.py'):
new_executable_path = old_executable_path.replace('-script.py', '')
with open(old_executable_path) as script_file_in:
lines = script_file_in.read().splitlines()
with open(old_executable_path, 'w') as script_file_out:
script_file_out.write('#!/opt/anaconda1anaconda2anaconda3/bin/python' + '\n')
for line in lines:
script_file_out.write(line + '\n')
os.renames(old_executable_path, new_executable_path)
def remove_executable(directory, executable):
"""Remove an executable from the 'Scripts' directory.
When converting from windows to unix, the .exe or .bat files
need to be removed as they do not exist in unix packages.
Positional arguments:
directory (str) -- the file path to the 'Scripts' directory
executable (str) -- the filename of the executable to remove
"""
if executable.endswith(('.exe', '.bat')):
script = os.path.join(directory, executable)
os.remove(script)
def create_exe_file(directory, executable, target_platform):
"""Create an exe file for each executable during a unix to windows conversion.
Positional arguments:
directory (str) -- the file path to the 'Scripts' directory
executable (str) -- the filename of the executable to create an exe file for
target_platform -- the platform to target: 'win-64' or 'win-32'
"""
exe_directory = os.path.dirname(__file__)
if target_platform.endswith('32'):
executable_file = os.path.join(exe_directory, 'cli-32.exe')
else:
executable_file = os.path.join(exe_directory, 'cli-64.exe')
renamed_executable_file = os.path.join(directory, '{}.exe' .format(executable))
shutil.copyfile(executable_file, renamed_executable_file)
def update_prefix_file(temp_dir, prefixes):
"""Update the source package's 'has_prefix' file.
Each file in the 'bin' or 'Scripts' folder will be written
to the 'has_prefix' file located in the package's 'info' directory.
Positional arguments:
temp_dir (str) -- the file path to the temporary directory containing the source
package's extracted contents
prefixes (List[str])-- the prefixes to write to 'has_prefix'
"""
has_prefix_file = os.path.join(temp_dir, 'info/has_prefix')
with open(has_prefix_file, 'w+') as prefix_file:
for prefix in prefixes:
prefix_file.write(prefix)
def update_files_file(temp_dir, verbose):
"""Update the source package's 'files' file.
The file path to each file that will be in the target archive is
written to the 'files' file.
Positional arguments:
temp_dir (str) -- the file path to the temporary directory containing the source
package's extracted contents
verbose (bool) -- show output of items that are updated
"""
files_file = os.path.join(temp_dir, 'info/files')
with open(files_file, 'w') as files:
file_paths = []
for dirpath, dirnames, filenames in walk(temp_dir):
relative_dir = os.path.relpath(dirpath, temp_dir)
filenames = [os.path.join(relative_dir, f) for f in filenames]
for filename in filter_info_files(filenames, ''):
file_paths.append(filename.replace('\\', '/').replace('\\\\', '/'))
if verbose:
print('Updating {}' .format(filename))
for file_path in sorted(file_paths):
files.write(file_path + '\n')
def create_target_archive(file_path, temp_dir, platform, output_dir):
"""Create the converted package's tar file.
Positional arguments:
file_path (str) -- the file path to the source package's tar file
temp_dir (str) -- the file path to the temporary directory containing the source
package's extracted contents
platform (str) -- the platform to convert to: 'win-64', 'win-32', 'linux-64',
'linux-32', or 'osx-64'
"""
output_directory = os.path.join(output_dir, platform)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
destination = os.path.join(output_directory, os.path.basename(file_path))
with tarfile.open(destination, 'w:bz2') as target:
for dirpath, dirnames, filenames in walk(temp_dir):
relative_dir = os.path.relpath(dirpath, temp_dir)
filenames = [os.path.join(relative_dir, f) for f in filenames]
for filename in filenames:
target.add(os.path.join(temp_dir, filename), arcname=filename)
def convert_between_unix_platforms(file_path, output_dir, platform, dependencies, verbose):
"""Convert package between unix platforms.
Positional arguments:
file_path (str) -- the file path to the source package's tar file
output_dir (str) -- the file path to where to output the converted tar file
platform (str) -- the platform to convert to: 'linux-64', 'linux-32', or 'osx-64'
dependencies (List[str]) -- the dependencies passed from the command line
verbose (bool) -- show output of items that are updated
"""
temp_dir = extract_temporary_directory(file_path)
update_index_file(temp_dir, platform, dependencies, verbose)
create_target_archive(file_path, temp_dir, platform, output_dir)
# we need to manually remove the temporary directory created by tempfile.mkdtemp
shutil.rmtree(temp_dir)
def convert_between_windows_architechtures(file_path, output_dir, platform,
dependencies, verbose):
"""Convert package between windows architectures.
Positional arguments:
file_path (str) -- the file path to the source package's tar file
output_dir (str) -- the file path to where to output the converted tar file
platform (str) -- the platform to convert to: 'win-64' or 'win-32'
dependencies (List[str]) -- the dependencies passed from the command line
verbose (bool) -- show output of items that are updated
"""
temp_dir = extract_temporary_directory(file_path)
update_index_file(temp_dir, platform, dependencies, verbose)
create_target_archive(file_path, temp_dir, platform, output_dir)
# we need to manually remove the temporary directory created by tempfile.mkdtemp
shutil.rmtree(temp_dir)
def convert_from_unix_to_windows(file_path, output_dir, platform, dependencies, verbose):
"""Convert a package from a unix platform to windows.
Positional arguments:
file_path (str) -- the file path to the source package's tar file
output_dir (str) -- the file path to where to output the converted tar file
platform (str) -- the platform to convert to: 'win-64' or 'win-32'
dependencies (List[str]) -- the dependencies passed from the command line
verbose (bool) -- show output of items that are updated
"""
temp_dir = extract_temporary_directory(file_path)
prefixes = set()
for entry in os.listdir(temp_dir):
directory = os.path.join(temp_dir, entry)
if os.path.isdir(directory) and entry.strip(os.sep) == 'lib':
update_lib_contents(directory, temp_dir, 'win', file_path)
if os.path.isdir(directory) and entry.strip(os.sep) == 'bin':
for script in os.listdir(directory):
if (os.path.isfile(os.path.join(directory, script)) and
not is_binary_file(directory, script) and
not script.startswith('.')):
rename_executable(directory, script, 'win')
create_exe_file(directory, retrieve_executable_name(script),
platform)
prefixes.add('/opt/anaconda1anaconda2anaconda3 text Scripts/{}-script.py\n'
.format(retrieve_executable_name(script)))
new_bin_path = os.path.join(temp_dir, 'Scripts')
os.renames(directory, new_bin_path)
update_index_file(temp_dir, platform, dependencies, verbose)
update_prefix_file(temp_dir, prefixes)
update_paths_file(temp_dir, target_platform='win')
update_files_file(temp_dir, verbose)
create_target_archive(file_path, temp_dir, platform, output_dir)
shutil.rmtree(temp_dir)
def convert_from_windows_to_unix(file_path, output_dir, platform, dependencies, verbose):
"""Convert a package from windows to a unix platform.
Positional arguments:
file_path (str) -- the file path to the source package's tar file
output_dir (str) -- the file path to where to output the converted tar file
platform (str) -- the platform to convert to: 'linux-64', 'linux-32', or | |
<reponame>luxonis/Factory-calibration-DepthAI<filename>python3_ws/src/calibration/scripts/depthai_calibration.py
#!/usr/bin/env python3
import cv2
import sys
import copy
import platform
import signal
import subprocess
import json
import csv
import time
import numpy as np
import os
from pathlib import Path
import shutil
from datetime import datetime, timedelta
import rospy
from std_msgs.msg import String
from calibration.srv import Capture
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
import depthai as dai
import consts.resource_paths
from depthai_helpers.calibration_utils import *
from depthai_helpers.pygame_checkbox import Checkbox, pygame_render_text
import pygame
from pygame.locals import *
from depthai_helpers import utils
os.environ['SDL_VIDEO_WINDOW_POS'] = '100,50'
on_embedded = platform.machine().startswith(
'arm') or platform.machine().startswith('aarch64')
white = [255, 255, 255]
orange = [143, 122, 4]
red = [230, 9, 9]
green = [4, 143, 7]
black = [0, 0, 0]
pygame.init()
stringToCam = {
'RGB' : dai.CameraBoardSocket.CAM_A,
'LEFT' : dai.CameraBoardSocket.CAM_B,
'RIGHT' : dai.CameraBoardSocket.CAM_C,
'CAM_A' : dai.CameraBoardSocket.CAM_A,
'CAM_B' : dai.CameraBoardSocket.CAM_B,
'CAM_C' : dai.CameraBoardSocket.CAM_C,
'CAM_D' : dai.CameraBoardSocket.CAM_D,
'CAM_E' : dai.CameraBoardSocket.CAM_E,
'CAM_F' : dai.CameraBoardSocket.CAM_F,
'CAM_G' : dai.CameraBoardSocket.CAM_G,
'CAM_H' : dai.CameraBoardSocket.CAM_H
}
CamToString = {
dai.CameraBoardSocket.CAM_A : 'RGB' ,
dai.CameraBoardSocket.CAM_B : 'LEFT' ,
dai.CameraBoardSocket.CAM_C : 'RIGHT',
dai.CameraBoardSocket.CAM_A : 'CAM_A',
dai.CameraBoardSocket.CAM_B : 'CAM_B',
dai.CameraBoardSocket.CAM_C : 'CAM_C',
dai.CameraBoardSocket.CAM_D : 'CAM_D',
dai.CameraBoardSocket.CAM_E : 'CAM_E',
dai.CameraBoardSocket.CAM_F : 'CAM_F',
dai.CameraBoardSocket.CAM_G : 'CAM_G',
dai.CameraBoardSocket.CAM_H : 'CAM_H'
}
camToMonoRes = {
'OV7251' : dai.MonoCameraProperties.SensorResolution.THE_480_P,
'OV9*82' : dai.MonoCameraProperties.SensorResolution.THE_800_P
}
camToRgbRes = {
'IMX378' : dai.ColorCameraProperties.SensorResolution.THE_4_K,
'IMX214' : dai.ColorCameraProperties.SensorResolution.THE_4_K,
'OV9*82' : dai.ColorCameraProperties.SensorResolution.THE_800_P
}
class depthai_calibration_node:
def __init__(self, depthai_args):
self.package_path = depthai_args['package_path']
self.args = depthai_args
self.bridge = CvBridge()
self.is_service_active = False
self.disp = pygame.display
self.screen = self.disp.set_mode((800, 600))
self.screen.fill(white)
self.disp.set_caption("Calibration - Device check ")
# self.focus_value = 0
# self.defaultLensPosition = 135
self.focusSigmaThreshold = 30
# if self.rgbCcm == 'Sunny':
# self.focus_value = 135
# elif self.rgbCcm == 'KingTop':
# self.focus_value = 135
# elif self.rgbCcm == 'ArduCam':
# self.focus_value = 135
# self.frame_count = 0
self.init_time = time.time()
if self.args['board']:
board_path = Path(self.args['board'])
if not board_path.exists():
board_path = Path(consts.resource_paths.boards_dir_path) / \
Path(self.args['board'].upper()).with_suffix('.json')
if not board_path.exists():
raise ValueError(
'Board config not found: {}'.format(board_path))
with open(board_path) as fp:
self.board_config = json.load(fp)
self.board_config = self.board_config['board_config']
self.board_config_backup = self.board_config
self.aruco_dictionary = cv2.aruco.Dictionary_get(
cv2.aruco.DICT_4X4_1000)
self.ccm_selector()
# Connection checks ----------->
title = "Device Status"
pygame_render_text(self.screen, title, (350, 20), orange, 50)
self.auto_checkbox_names = []
self.auto_focus_checkbox_names = []
if self.args['usbMode']:
self.auto_checkbox_names.append("USB3")
header = ['time', 'Mx_serial_id']
for cam_id in self.board_config['cameras'].keys():
cam_info = self.board_config['cameras'][cam_id]
header.append(cam_info['name'] + '-CCM')
# header.append(cam_info['name'] + '-camera')
header.append(cam_info['name'] + '-focus-stdDev')
header.append(cam_info['name'] + '-Reprojection-Error')
self.auto_checkbox_names.append(cam_info['name'] + '-Camera-connected')
self.auto_checkbox_names.append(cam_info['name'] + '-Stream')
self.auto_focus_checkbox_names.append(cam_info['name'] + '-Focus')
if 'extrinsics' in cam_info:
if 'to_cam' in cam_info['extrinsics']:
right_cam = self.board_config['cameras'][cam_info['extrinsics']['to_cam']]['name']
header.append('Epipolar-error-' + cam_info['name'] + '-' + right_cam)
# ['Mono-CCM', 'RGB-CCM',
# 'left_camera', 'right_camera', 'rgb_camera',
# 'left_focus_stdDev', 'right_focus_stdDev', 'rgb_focus_stdDev',
# 'Epipolar error L-R', 'Epipolar error R-Rgb', 'RGB Reprojection Error']
log_file = self.args['log_path'] + "/calibration_logs_" + arg['board'] + ".csv"
if not os.path.exists(log_file):
with open(log_file, mode='w') as log_fopen:
log_csv_writer = csv.writer(log_fopen, delimiter=',')
log_csv_writer.writerow(header)
y = 110
x = 200
self.start_disp = False
font = pygame.font.Font(None, 20)
self.auto_checkbox_dict = {}
for i in range(len(self.auto_checkbox_names)):
w, h = font.size(self.auto_checkbox_names[i])
x_axis = x - w
y_axis = y + (40*i)
font_surf = font.render(self.auto_checkbox_names[i], True, green)
self.screen.blit(font_surf, (x_axis, y_axis))
self.auto_checkbox_dict[self.auto_checkbox_names[i]] = Checkbox(self.screen, x + 10, y_axis-5, outline_color=green,
check_color=green, check=False)
# text = 'call rosservice of device_status_handler to update the device status'
for i in range(len(self.auto_checkbox_names)):
self.auto_checkbox_dict[self.auto_checkbox_names[i]].render_checkbox()
y = y + (40*len(self.auto_checkbox_names))
self.auto_focus_checkbox_dict = {}
for i in range(len(self.auto_focus_checkbox_names)):
w, h = font.size(self.auto_focus_checkbox_names[i])
x_axis = x - w
y_axis = y + (40*i)
font_surf = font.render(self.auto_focus_checkbox_names[i], True, green)
self.screen.blit(font_surf, (x_axis, y_axis))
self.auto_focus_checkbox_dict[self.auto_focus_checkbox_names[i]] = Checkbox(self.screen, x + 10, y_axis-5, outline_color=green,
check_color=green, check=False)
for i in range(len(self.auto_focus_checkbox_names)):
self.auto_focus_checkbox_dict[self.auto_focus_checkbox_names[i]].render_checkbox()
pygame.draw.rect(self.screen, red, no_button)
pygame_render_text(self.screen, 'Exit', (500, 505))
self.no_active = False
self.click = False
# self.disp.update()
# creating services and publishers at the end to avoid calls before initialization
self.capture_srv = rospy.Service(
self.args["capture_service_name"], Capture, self.capture_servive_handler)
self.calib_srv = rospy.Service(
self.args["calibration_service_name"], Capture, self.calibration_servive_handler)
self.dev_status_srv = rospy.Service(
"device_status", Capture, self.device_status_handler)
self.focus_setting_srv = rospy.Service(
"rgb_focus_adjuster", Capture, self.camera_focus_adjuster)
# self.rgb_focus_srv = rospy.Service(
# "set_rgb_focus", Capture, self.rgb_focus_handler)
self.args['cameraModel'] = 'perspective'
self.imgPublishers = dict()
for cam_id in self.board_config['cameras']:
name = self.board_config['cameras'][cam_id]['name']
self.imgPublishers[name] = rospy.Publisher(name, Image, queue_size=10)
self.device = None
def ccm_selector(self):
title = "Select the mono Camera and RGB camera vendor"
pygame_render_text(self.screen, title, (70, 20), black, 40)
space = " "
title = ""
for camera in self.board_config['cameras'].keys():
title += self.board_config['cameras'][camera]['name'] + space
pygame_render_text(self.screen, title, (200, 100), green, 25)
ccm_names = ['Sunny', 'KingTop', 'ArduCam']
ccm_names_dict = dict()
y = 200
x = 110
font = pygame.font.Font(None, 30)
for i in range(len(ccm_names)):
w, h = font.size(ccm_names[i])
x_axis = x - w
y_axis = y + (60*i)
font_surf = font.render(ccm_names[i], True, black)
self.screen.blit(font_surf, (x_axis, y_axis))
ccm_names_dict[ccm_names[i]] = []
offset = 150
offset_increment = 1
for camera in self.board_config['cameras'].keys():
# ccm_names_dict[ccm_names[i]].append()
ccm_names_dict[ccm_names[i]].append(Checkbox(self.screen, x + (offset * offset_increment), y_axis-5, outline_color=green,
check_color=green, check=False, disable_pass = True))
ccm_names_dict[ccm_names[i]][-1].render_checkbox()
offset_increment += 1
# ccm_names_dict[ccm_names[i]].append(Checkbox(self.screen, x + 420, y_axis-5, outline_color=green,
# check_color=green, check=False, disable_pass = True))
# ccm_names_dict[ccm_names[i]][0].render_checkbox()
# ccm_names_dict[ccm_names[i]][1].render_checkbox()
fill_color = pygame.Rect(20, y_axis + 40, 750, 2)
pygame.draw.rect(self.screen, black, fill_color)
next_button = pygame.Rect(600, 430, 60, 35)
pygame.draw.rect(self.screen, orange, next_button)
pygame_render_text(self.screen, 'Next', (605, 440))
is_saved = False
self.monoCcm = None
self.rgbCcm = None
is_ccm_selected = []
self.ccm_selected = {}
for camera in self.board_config['cameras'].keys():
is_ccm_selected.append(False)
self.ccm_selected[self.board_config['cameras'][camera]['name']] = None
while not is_saved:
self.disp.update()
for event in pygame.event.get():
# catching NEXT button and checkboxes clicks
if event.type == pygame.MOUSEMOTION:
x, y = event.pos
px, py, w, h = next_button
if px < x < px + w and py < y < py + h:
active = True
else:
active = False
if event.type == pygame.MOUSEBUTTONDOWN:
click = True
if event.type == pygame.MOUSEBUTTONUP:
if active and not is_saved and click:
click = False
isAllChecked = True
for val in is_ccm_selected:
isAllChecked = isAllChecked and val
if isAllChecked:
is_saved = True
pygame.draw.rect(self.screen, green, next_button)
pygame_render_text(self.screen, 'Next', (605, 440))
else:
pygame_render_text(self.screen, "Select the type of module before clicking next ", (150, 480), red)
if active and is_saved and click:
click = False
pygame_render_text(self.screen, "Saving Selection", (605, 480), green)
ccm1 = ccm_names_dict[ccm_names[0]]
ccm2 = ccm_names_dict[ccm_names[1]]
ccm3 = ccm_names_dict[ccm_names[2]]
for i in range(len(list(self.board_config['cameras']))):
ccm1[i].update_checkbox_rel(event, ccm2[i], ccm3[i])
ccm2[i].update_checkbox_rel(event, ccm1[i], ccm3[i])
ccm3[i].update_checkbox_rel(event, ccm1[i], ccm2[i])
ccm1[i].render_checkbox()
ccm2[i].render_checkbox()
ccm3[i].render_checkbox()
if ccm1[i].is_checked() or ccm2[i].is_checked() or ccm3[i].is_checked():
is_ccm_selected[i] = True
camList = list(self.board_config['cameras'])
print("selected CCMS:")
for i in range(len(ccm_names)):
for j in range(len(camList)):
if ccm_names_dict[ccm_names[i]][j].is_checked():
self.ccm_selected[self.board_config['cameras'][camList[j]]['name']] = ccm_names[i]
print(self.board_config['cameras'][camList[j]]['name'], '=> ', ccm_names[i])
self.screen.fill(white)
self.disp.update()
def create_pipeline(self, camProperties):
pipeline = dai.Pipeline()
for cam_id in self.board_config['cameras']:
cam_info = self.board_config['cameras'][cam_id]
if cam_info['type'] == 'mono':
cam_node = pipeline.createMonoCamera()
xout = pipeline.createXLinkOut()
cam_node.setBoardSocket(stringToCam[cam_id])
cam_node.setResolution(camToMonoRes[cam_info['sensorName']])
cam_node.setFps(10)
xout.setStreamName(cam_info['name'])
cam_node.out.link(xout.input)
else:
cam_node = pipeline.createColorCamera()
xout = pipeline.createXLinkOut()
cam_node.setBoardSocket(stringToCam[cam_id])
cam_node.setResolution(camToRgbRes[cam_info['sensorName']])
cam_node.setFps(10)
xout.setStreamName(cam_info['name'])
cam_node.isp.link(xout.input)
if cam_info['hasAutofocus']:
controlIn = pipeline.createXLinkIn()
controlIn.setStreamName(cam_info['name'] + '-control')
controlIn.out.link(cam_node.inputControl)
return pipeline
def capture_exit(self):
is_clicked = False
for event in pygame.event.get():
# self.disp.update()
if event.type == pygame.MOUSEMOTION:
x, y = event.pos
px, py, w, h = no_button
if px < x < px + w and py < y < py + h:
self.no_active = True
pygame.draw.rect(self.screen, orange, no_button)
pygame_render_text(self.screen, 'Exit', (500, 505))
else:
self.no_active = False
pygame.draw.rect(self.screen, red, no_button)
pygame_render_text(self.screen, 'Exit', (500, 505))
if event.type == pygame.MOUSEBUTTONDOWN and self.no_active:
print("setting click")
self.click = True
if event.type == pygame.MOUSEBUTTONUP:
if self.no_active and self.click:
print("No clicked")
is_clicked = True
break
return is_clicked
def publisher(self):
while not rospy.is_shutdown():
if self.capture_exit():
print("signaling...")
rospy.signal_shutdown("Finished calibration")
# if self.start_disp:
self.disp.update()
if not self.is_service_active and self.device is not None and not self.device.isClosed():
for config_cam in self.board_config['cameras']:
cam_info = self.board_config['cameras'][config_cam]
frame = self.camera_queue[cam_info['name']].tryGet()
if frame is not None:
currFrame = None
if frame.getType() == dai.RawImgFrame.Type.RAW8:
currFrame = frame.getCvFrame()
else:
currFrame = cv2.cvtColor(frame.getCvFrame(), cv2.COLOR_BGR2GRAY)
self.imgPublishers[cam_info['name']].publish(
self.bridge.cv2_to_imgmsg(currFrame, "passthrough"))
def cvt_bgr(self, packet):
meta = packet.getMetadata()
w = meta.getFrameWidth()
h = meta.getFrameHeight()
packetData = packet.getData()
yuv420p = packetData.reshape((h * 3 // 2, w))
return cv2.cvtColor(yuv420p, cv2.COLOR_YUV2BGR_IYUV)
def parse_frame(self, frame, stream_name, file_name):
if frame is None:
print("Frame with stream name -> {} was None".format(stream_name))
return False
file_name += '.png'
# filename = image_filename(stream_name, self.current_polygon, self.images_captured)
print(self.package_path + "/dataset/{}/{}".format(stream_name, file_name))
ds_path = self.package_path + "/dataset/{}".format(stream_name)
if not os.path.exists(ds_path):
os.makedirs(ds_path)
cv2.imwrite(self.package_path +
"/dataset/{}/{}".format(stream_name, file_name), frame)
print("py: Saved image as: " + str(file_name) +
"in folder ->" + stream_name)
## adding backup
self.backup_ds(stream_name, file_name, frame)
return True
def retest(self):
| |
<reponame>aditya-sengupta/tesscomp-prototyping
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A TaskManager which keeps track of which targets to process.
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import os
import sqlite3
import logging
import json
from . import STATUS, utilities
#--------------------------------------------------------------------------------------------------
class TaskManager(object):
"""
A TaskManager which keeps track of which targets to process.
"""
def __init__(self, todo_file, cleanup=False, overwrite=False, cleanup_constraints=None,
summary=None, summary_interval=100):
"""
Initialize the TaskManager which keeps track of which targets to process.
Parameters:
todo_file (string): Path to the TODO-file.
cleanup (boolean, optional): Perform cleanup/optimization of TODO-file before
during initialization. Default=False.
overwrite (boolean, optional): Restart calculation from the beginning, discarding
any previous results. Default=False.
cleanup_constraints (dict, optional): Dict of constraint for cleanup of the status of
previous correction runs. If not specified, all bad results are cleaned up.
summary (string, optional): Path to file where to periodically write a progress summary.
The output file will be in JSON format. Default=None.
summary_interval (int, optional): Interval at which summary file is updated.
Setting this to 1 will mean writing the file after every tasks completes.
Default=100.
Raises:
FileNotFoundError: If TODO-file could not be found.
"""
self.overwrite = overwrite
self.summary_file = summary
self.summary_interval = summary_interval
self.summary_counter = 0
if os.path.isdir(todo_file):
todo_file = os.path.join(todo_file, 'todo.sqlite')
if not os.path.exists(todo_file):
raise FileNotFoundError('Could not find TODO-file')
if cleanup_constraints is not None and not isinstance(cleanup_constraints, (dict, list)):
raise ValueError("cleanup_constraints should be dict or list")
# Setup logging:
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
self.logger = logging.getLogger(__name__)
if not self.logger.hasHandlers():
self.logger.addHandler(console)
self.logger.setLevel(logging.INFO)
# Load the SQLite file:
self.conn = sqlite3.connect(todo_file)
self.conn.row_factory = sqlite3.Row
self.cursor = self.conn.cursor()
self.cursor.execute("PRAGMA foreign_keys=ON;")
self.cursor.execute("PRAGMA locking_mode=EXCLUSIVE;")
self.cursor.execute("PRAGMA journal_mode=TRUNCATE;")
# Reset the status of everything for a new run:
if overwrite:
self.cursor.execute("UPDATE todolist SET status=NULL;")
self.cursor.execute("DROP TABLE IF EXISTS diagnostics;")
self.cursor.execute("DROP TABLE IF EXISTS photometry_skipped;")
self.conn.commit()
# Create table for diagnostics:
self.cursor.execute("""CREATE TABLE IF NOT EXISTS diagnostics (
priority INTEGER PRIMARY KEY ASC NOT NULL,
lightcurve TEXT,
method_used TEXT NOT NULL,
elaptime REAL NOT NULL,
worker_wait_time REAL,
mean_flux DOUBLE PRECISION,
variance DOUBLE PRECISION,
variability DOUBLE PRECISION,
rms_hour DOUBLE PRECISION,
ptp DOUBLE PRECISION,
pos_row REAL,
pos_column REAL,
contamination REAL,
mask_size INTEGER,
edge_flux REAL,
stamp_width INTEGER,
stamp_height INTEGER,
stamp_resizes INTEGER,
errors TEXT,
FOREIGN KEY (priority) REFERENCES todolist(priority) ON DELETE CASCADE ON UPDATE CASCADE
);""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS photometry_skipped (
priority INTEGER NOT NULL,
skipped_by INTEGER NOT NULL,
FOREIGN KEY (priority) REFERENCES todolist(priority) ON DELETE CASCADE ON UPDATE CASCADE,
FOREIGN KEY (skipped_by) REFERENCES todolist(priority) ON DELETE RESTRICT ON UPDATE CASCADE
);""")
self.cursor.execute("CREATE UNIQUE INDEX IF NOT EXISTS diagnostics_lightcurve_idx ON diagnostics (lightcurve);")
self.conn.commit()
# Add status indicator for corrections to todolist, if it doesn't already exists:
# This is only for backwards compatibility.
self.cursor.execute("PRAGMA table_info(diagnostics)")
existing_columns = [r['name'] for r in self.cursor.fetchall()]
if 'edge_flux' not in existing_columns:
self.logger.debug("Adding edge_flux column to diagnostics")
self.cursor.execute("ALTER TABLE diagnostics ADD COLUMN edge_flux REAL DEFAULT NULL")
self.conn.commit()
if 'worker_wait_time' not in existing_columns:
self.logger.debug("Adding worker_wait_time column to diagnostics")
self.cursor.execute("ALTER TABLE diagnostics ADD COLUMN worker_wait_time REAL DEFAULT NULL")
self.conn.commit()
if 'method_used' not in existing_columns:
# Since this one is NOT NULL, we have to do some magic to fill out the
# new column after creation, by finding keywords in other columns.
# This can be a pretty slow process, but it only has to be done once.
self.logger.debug("Adding method_used column to diagnostics")
self.cursor.execute("ALTER TABLE diagnostics ADD COLUMN method_used TEXT NOT NULL DEFAULT 'aperture';")
for m in ('aperture', 'halo', 'psf', 'linpsf'):
self.cursor.execute("UPDATE diagnostics SET method_used=? WHERE priority IN (SELECT priority FROM todolist WHERE method=?);", [m, m])
self.cursor.execute("UPDATE diagnostics SET method_used='halo' WHERE method_used='aperture' AND errors LIKE '%Automatically switched to Halo photometry%';")
self.conn.commit()
if 'starid' in existing_columns:
# Drop this column from the diagnostics table, since the information is already in
# the todolist table. Use utility function for this, since SQLite does not
# have a DROP COLUMN mechanism directly.
utilities.sqlite_drop_column(self.conn, 'diagnostics', 'starid')
# Reset calculations with status STARTED, ABORT or ERROR:
# We are re-running all with error, in the hope that they will work this time around:
clear_status = str(STATUS.STARTED.value) + ',' + str(STATUS.ABORT.value) + ',' + str(STATUS.ERROR.value)
constraints = ['status IN (' + clear_status + ')']
# Add additional constraints from the user input and build SQL query:
if cleanup_constraints:
if isinstance(cleanup_constraints, dict):
cc = cleanup_constraints.copy()
if cc.get('datasource'):
constraints.append("datasource='ffi'" if cc.pop('datasource') == 'ffi' else "datasource!='ffi'")
for key, val in cc.items():
if val is not None:
constraints.append(key + ' IN (%s)' % ','.join([str(v) for v in np.atleast_1d(val)]))
else:
constraints += cleanup_constraints
constraints = ' AND '.join(constraints)
self.cursor.execute("DELETE FROM diagnostics WHERE priority IN (SELECT todolist.priority FROM todolist WHERE " + constraints + ");")
self.cursor.execute("UPDATE todolist SET status=NULL WHERE " + constraints + ";")
self.conn.commit()
# Analyze the tables for better query planning:
self.cursor.execute("ANALYZE;")
# Prepare summary object:
self.summary = {
'slurm_jobid': os.environ.get('SLURM_JOB_ID', None),
'numtasks': 0,
'tasks_run': 0,
'last_error': None,
'mean_elaptime': None,
'mean_worker_waittime': None
}
# Make sure to add all the different status to summary:
for s in STATUS:
self.summary[s.name] = 0
# If we are going to output summary, make sure to fill it up:
if self.summary_file:
# Extract information from database:
self.cursor.execute("SELECT status,COUNT(*) AS cnt FROM todolist GROUP BY status;")
for row in self.cursor.fetchall():
self.summary['numtasks'] += row['cnt']
if row['status'] is not None:
self.summary[STATUS(row['status']).name] = row['cnt']
# Write summary to file:
self.write_summary()
# Run a cleanup/optimization of the database before we get started:
if cleanup:
self.logger.info("Cleaning TODOLIST before run...")
try:
self.conn.isolation_level = None
self.cursor.execute("VACUUM;")
finally:
self.conn.isolation_level = ''
#----------------------------------------------------------------------------------------------
def close(self):
"""Close TaskManager and all associated objects."""
if hasattr(self, 'cursor') and hasattr(self, 'conn'):
try:
self.conn.rollback()
self.cursor.execute("PRAGMA journal_mode=DELETE;")
self.conn.commit()
self.cursor.close()
except sqlite3.ProgrammingError:
pass
if hasattr(self, 'conn'):
self.conn.close()
self.write_summary()
#----------------------------------------------------------------------------------------------
def __enter__(self):
return self
#----------------------------------------------------------------------------------------------
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------------------------
def __del__(self):
self.summary_file = None
self.close()
#----------------------------------------------------------------------------------------------
def get_number_tasks(self, starid=None, camera=None, ccd=None, datasource=None, priority=None):
"""
Get number of tasks due to be processed.
Parameters:
priority (int, optional): Only return task matching this priority.
starid (int, optional): Only return tasks matching this starid.
camera (int, optional): Only return tasks matching this camera.
ccd (int, optional): Only return tasks matching this CCD.
datasource (str, optional): Only return tasks matching this datasource.
Returns:
int: Number of tasks due to be processed.
"""
constraints = []
if priority is not None:
constraints.append('todolist.priority=%d' % priority)
if starid is not None:
constraints.append('todolist.starid=%d' % starid)
if camera is not None:
constraints.append('todolist.camera=%d' % camera)
if ccd is not None:
constraints.append('todolist.ccd=%d' % ccd)
if datasource is not None:
constraints.append("todolist.datasource='ffi'" if datasource == 'ffi' else "todolist.datasource!='ffi'")
if constraints:
constraints = " AND " + " AND ".join(constraints)
else:
constraints = ''
self.cursor.execute("SELECT COUNT(*) AS num FROM todolist WHERE status IS NULL" + constraints + ";")
return int(self.cursor.fetchone()['num'])
#----------------------------------------------------------------------------------------------
def get_task(self, starid=None, camera=None, ccd=None, datasource=None, priority=None):
"""
Get next task to be processed.
Returns:
dict or None: Dictionary of settings for task.
"""
constraints = []
if priority is not None:
constraints.append('todolist.priority=%d' % priority)
if starid is not None:
constraints.append('todolist.starid=%d' % starid)
if camera is not None:
constraints.append('todolist.camera=%d' % camera)
if ccd is not None:
constraints.append('todolist.ccd=%d' % ccd)
if datasource is not None:
constraints.append("todolist.datasource='ffi'" if datasource == 'ffi' else "todolist.datasource!='ffi'")
if constraints:
constraints = " AND " + " AND ".join(constraints)
else:
constraints = ''
self.cursor.execute("SELECT priority,starid,method,sector,camera,ccd,datasource,tmag FROM todolist WHERE status IS NULL" + constraints + " ORDER BY priority LIMIT 1;")
task = self.cursor.fetchone()
if task:
return dict(task)
return None
#----------------------------------------------------------------------------------------------
def get_random_task(self):
"""
Get random task to be processed.
Returns:
dict or None: Dictionary of settings for task.
"""
self.cursor.execute("SELECT priority,starid,method,sector,camera,ccd,datasource,tmag FROM todolist WHERE status IS NULL ORDER BY RANDOM() LIMIT 1;")
task = self.cursor.fetchone()
if task:
return dict(task)
return None
#----------------------------------------------------------------------------------------------
def start_task(self, taskid):
"""
Mark a task as STARTED in the TODO-list.
"""
self.cursor.execute("UPDATE todolist SET status=? WHERE priority=?;", (STATUS.STARTED.value, taskid))
self.conn.commit()
self.summary['STARTED'] += 1
#----------------------------------------------------------------------------------------------
def save_result(self, result):
"""
Save results and diagnostics. This will update the TODO list.
Parameters:
results (dict): Dictionary of results and diagnostics.
"""
# Extract details dictionary:
details = result.get('details', {})
error_msg = details.get('errors', [])
# The status of this target returned by the photometry:
my_status = result['status']
# Also set status of targets that were marked as "SKIPPED" by this target:
if 'skip_targets' in details and len(details['skip_targets']) > 0:
skip_targets = set(details['skip_targets'])
if result['datasource'].startswith('tpf:') and int(result['datasource'][4:]) in skip_targets:
# This secondary target is in the mask of the primary target.
# We never want to return a lightcurve for a secondary target over
# a primary target, so we are going to mark this one as SKIPPED.
primary_tpf_target_starid = int(result['datasource'][4:])
self.cursor.execute("SELECT priority FROM todolist WHERE starid=? AND datasource='tpf' AND sector=? AND camera=? AND ccd=?;", (
primary_tpf_target_starid,
result['sector'],
result['camera'],
result['ccd']
))
primary_tpf_target_priority = self.cursor.fetchone()
# Mark the current star as SKIPPED and that it was caused by the primary:
self.logger.info("Changing status to SKIPPED for priority %s because it overlaps with primary target TIC %d", result['priority'], primary_tpf_target_starid)
my_status = STATUS.SKIPPED
if primary_tpf_target_priority is not None:
self.cursor.execute("INSERT INTO photometry_skipped (priority,skipped_by) VALUES (?,?);", (
result['priority'],
primary_tpf_target_priority[0]
))
else:
self.logger.warning("Could not find primary TPF target (TIC %d) for priority=%d", primary_tpf_target_starid, result['priority'])
error_msg.append("TargetNotFoundError: Could not find primary TPF target (TIC %d)" % primary_tpf_target_starid)
else:
# Create unique list of starids to be masked as skipped:
skip_starids = ','.join([str(starid) for starid in skip_targets])
# Ask the todolist if there are any stars that are brighter than this
# one among the other targets in the mask:
if result['datasource'] == 'tpf':
skip_datasources = "'tpf','tpf:%d'" % result['starid']
else:
skip_datasources = "'" + result['datasource'] + "'"
self.cursor.execute("SELECT priority,tmag FROM todolist WHERE starid IN (" + skip_starids + ") AND datasource IN (" + skip_datasources + ") AND sector=? AND camera=? AND ccd=?;", (
result['sector'],
result['camera'],
result['ccd']
))
skip_rows = self.cursor.fetchall()
if len(skip_rows) > 0:
skip_tmags = np.array([row['tmag'] for row in skip_rows])
if np.all(result['tmag'] < skip_tmags):
# This target was the brightest star in the mask,
# so let's keep it and simply mark all | |
<reponame>cdown/statsmodels
from __future__ import division
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
import statsmodels.api as sm
from .results.results_discrete import RandHIE
class CheckGeneric(object):
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-5, rtol=1e-5)
def test_llf(self):
assert_allclose(self.res1.llf, self.res2.llf, atol=1e-5, rtol=1e-5)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, atol=1e-3, rtol=1e-5)
def test_bse(self):
assert_allclose(self.res1.bse, self.res2.bse, atol=1e-3, rtol=1e-3)
def test_aic(self):
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-2, rtol=1e-2)
def test_bic(self):
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-1, rtol=1e-1)
def test_t(self):
unit_matrix = np.identity(self.res1.params.size)
t_test = self.res1.t_test(unit_matrix)
assert_allclose(self.res1.tvalues, t_test.tvalue)
def test_fit_regularized(self):
model = self.res1.model
alpha = np.ones(len(self.res1.params))
alpha[-2:] = 0
res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0, maxiter=500)
assert_allclose(res_reg.params[2:], self.res1.params[2:],
atol=5e-2, rtol=5e-2)
def test_init_keys(self):
init_kwds = self.res1.model._get_init_kwds()
#assert_equal(sorted(list(init_kwds.keys())), self.init_keys)
assert_equal(set(init_kwds.keys()), set(self.init_keys))
for key, value in self.init_kwds.items():
assert_equal(init_kwds[key], value)
def test_null(self):
# call llnull, so null model is attached, side effect of cached attribute
self.res1.llnull
# check model instead of value
exog_null = self.res1.res_null.model.exog
exog_infl_null = self.res1.res_null.model.exog_infl
assert_array_equal(exog_infl_null.shape,
(len(self.res1.model.exog), 1))
assert_equal(exog_null.ptp(), 0)
assert_equal(exog_infl_null.ptp(), 0)
def test_summary(self):
# SMOKE test
self.res1.summary()
class TestZeroInflatedModel_logit(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load()
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, inflation='logit').fit(method='newton', maxiter=500)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'logit'}
res2 = RandHIE()
res2.zero_inflated_poisson_logit()
cls.res2 = res2
class TestZeroInflatedModel_probit(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load()
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, inflation='probit').fit(method='newton', maxiter=500)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'probit'}
res2 = RandHIE()
res2.zero_inflated_poisson_probit()
cls.res2 = res2
class TestZeroInflatedModel_offset(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load()
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, offset=data.exog[:,7]).fit(method='newton', maxiter=500)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'logit'}
res2 = RandHIE()
res2.zero_inflated_poisson_offset()
cls.res2 = res2
def test_exposure(self):
# This test mostly the equivalence of offset and exposure = exp(offset)
# use data arrays from class model
model1 = self.res1.model
offset = model1.offset
model3 = sm.ZeroInflatedPoisson(model1.endog, model1.exog,
exog_infl=model1.exog_infl, exposure=np.exp(offset))
res3 = model3.fit(start_params=self.res1.params,
method='newton', maxiter=500)
assert_allclose(res3.params, self.res1.params, atol=1e-6, rtol=1e-6)
fitted1 = self.res1.predict()
fitted3 = self.res1.predict()
assert_allclose(fitted3, fitted1, atol=1e-6, rtol=1e-6)
ex = model1.exog
ex_infl = model1.exog_infl
offset = model1.offset
fitted1_0 = self.res1.predict(exog=ex, exog_infl=ex_infl,
offset=offset)
fitted3_0 = res3.predict(exog=ex, exog_infl=ex_infl,
exposure=np.exp(offset))
assert_allclose(fitted3_0, fitted1_0, atol=1e-6, rtol=1e-6)
ex = model1.exog[:10:2]
ex_infl = model1.exog_infl[:10:2]
offset = offset[:10:2]
# # TODO: this raises with shape mismatch,
# # i.e. uses offset or exposure from model -> fix it or not?
# GLM.predict to setting offset and exposure to zero
# fitted1_1 = self.res1.predict(exog=ex, exog_infl=ex_infl)
# fitted3_1 = res3.predict(exog=ex, exog_infl=ex_infl)
# assert_allclose(fitted3_1, fitted1_1, atol=1e-6, rtol=1e-6)
fitted1_2 = self.res1.predict(exog=ex, exog_infl=ex_infl,
offset=offset)
fitted3_2 = res3.predict(exog=ex, exog_infl=ex_infl,
exposure=np.exp(offset))
assert_allclose(fitted3_2, fitted1_2, atol=1e-6, rtol=1e-6)
assert_allclose(fitted1_2, fitted1[:10:2], atol=1e-6, rtol=1e-6)
assert_allclose(fitted3_2, fitted1[:10:2], atol=1e-6, rtol=1e-6)
class TestZeroInflatedModelPandas(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load_pandas()
cls.endog = data.endog
cls.data = data
exog = sm.add_constant(data.exog.iloc[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog.iloc[:,0], prepend=False)
# we don't need to verify convergence here
start_params = np.asarray([0.10337834587498942, -1.0459825102508549,
-0.08219794475894268, 0.00856917434709146,
-0.026795737379474334, 1.4823632430107334])
model = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, inflation='logit')
cls.res1 = model.fit(start_params=start_params, method='newton',
maxiter=500)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'logit'}
res2 = RandHIE()
res2.zero_inflated_poisson_logit()
cls.res2 = res2
def test_names(self):
param_names = ['inflate_lncoins', 'inflate_const', 'idp', 'lpi',
'fmde', 'const']
assert_array_equal(self.res1.model.exog_names, param_names)
assert_array_equal(self.res1.params.index.tolist(), param_names)
assert_array_equal(self.res1.bse.index.tolist(), param_names)
exog = sm.add_constant(self.data.exog.iloc[:,1:4], prepend=True)
exog_infl = sm.add_constant(self.data.exog.iloc[:,0], prepend=True)
param_names = ['inflate_const', 'inflate_lncoins', 'const', 'idp',
'lpi', 'fmde']
model = sm.ZeroInflatedPoisson(self.data.endog, exog,
exog_infl=exog_infl, inflation='logit')
assert_array_equal(model.exog_names, param_names)
class TestZeroInflatedPoisson_predict(object):
@classmethod
def setup_class(cls):
expected_params = [1, 0.5]
np.random.seed(123)
nobs = 200
exog = np.ones((nobs, 2))
exog[:nobs//2, 1] = 2
mu_true = exog.dot(expected_params)
cls.endog = sm.distributions.zipoisson.rvs(mu_true, 0.05,
size=mu_true.shape)
model = sm.ZeroInflatedPoisson(cls.endog, exog)
cls.res = model.fit(method='bfgs', maxiter=5000, maxfun=5000)
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=1e-2, rtol=1e-2)
def test_var(self):
assert_allclose((self.res.predict().mean() *
self.res._dispersion_factor.mean()),
self.endog.var(), atol=5e-2, rtol=5e-2)
def test_predict_prob(self):
res = self.res
endog = res.model.endog
pr = res.predict(which='prob')
pr2 = sm.distributions.zipoisson.pmf(np.arange(7)[:,None],
res.predict(), 0.05).T
assert_allclose(pr, pr2, rtol=0.05, atol=0.05)
class TestZeroInflatedGeneralizedPoisson(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load()
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedGeneralizedPoisson(data.endog, exog,
exog_infl=exog_infl, p=1).fit(method='newton', maxiter=500)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset', 'p']
cls.init_kwds = {'inflation': 'logit', 'p': 1}
res2 = RandHIE()
res2.zero_inflated_generalized_poisson()
cls.res2 = res2
def test_bse(self):
pass
def test_conf_int(self):
pass
def test_bic(self):
pass
def test_t(self):
unit_matrix = np.identity(self.res1.params.size)
t_test = self.res1.t_test(unit_matrix)
assert_allclose(self.res1.tvalues, t_test.tvalue)
def test_minimize(self):
# check additional optimizers using the `minimize` option
model = self.res1.model
# use the same start_params, but avoid recomputing
start_params = self.res1.mle_settings['start_params']
res_ncg = model.fit(start_params=start_params,
method='minimize', min_method="trust-ncg",
maxiter=500, disp=0)
assert_allclose(res_ncg.params, self.res2.params,
atol=1e-3, rtol=0.04)
assert_allclose(res_ncg.bse, self.res2.bse,
atol=1e-3, rtol=0.6)
assert_(res_ncg.mle_retvals['converged'] is True)
res_dog = model.fit(start_params=start_params,
method='minimize', min_method="dogleg",
maxiter=500, disp=0)
assert_allclose(res_dog.params, self.res2.params,
atol=1e-3, rtol=3e-3)
assert_allclose(res_dog.bse, self.res2.bse,
atol=1e-3, rtol=0.6)
assert_(res_dog.mle_retvals['converged'] is True)
res_bh = model.fit(start_params=start_params,
method='basinhopping', maxiter=500,
niter_success=3, disp=0)
assert_allclose(res_bh.params, self.res2.params,
atol=1e-4, rtol=3e-5)
assert_allclose(res_bh.bse, self.res2.bse,
atol=1e-3, rtol=0.6)
# skip, res_bh reports converged is false but params agree
#assert_(res_bh.mle_retvals['converged'] is True)
class TestZeroInflatedGeneralizedPoisson_predict(object):
@classmethod
def setup_class(cls):
expected_params = [1, 0.5, 0.5]
np.random.seed(1234)
nobs = 200
exog = np.ones((nobs, 2))
exog[:nobs//2, 1] = 2
mu_true = exog.dot(expected_params[:-1])
cls.endog = sm.distributions.zigenpoisson.rvs(mu_true, expected_params[-1],
2, 0.5, size=mu_true.shape)
model = sm.ZeroInflatedGeneralizedPoisson(cls.endog, exog, p=2)
cls.res = model.fit(method='bfgs', maxiter=5000, maxfun=5000)
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=1e-4, rtol=1e-4)
def test_var(self):
assert_allclose((self.res.predict().mean() *
self.res._dispersion_factor.mean()),
self.endog.var(), atol=0.05, rtol=0.1)
def test_predict_prob(self):
res = self.res
endog = res.model.endog
pr = res.predict(which='prob')
pr2 = sm.distributions.zinegbin.pmf(np.arange(12)[:,None],
res.predict(), 0.5, 2, 0.5).T
assert_allclose(pr, pr2, rtol=0.08, atol=0.05)
class TestZeroInflatedNegativeBinomialP(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load()
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
# cheating for now, parameters are not well identified in this dataset
# see https://github.com/statsmodels/statsmodels/pull/3928#issuecomment-331724022
sp = np.array([1.88, -10.28, -0.20, 1.14, 1.34])
cls.res1 = sm.ZeroInflatedNegativeBinomialP(data.endog, exog,
exog_infl=exog_infl, p=2).fit(start_params=sp, method='nm',
xtol=1e-6, maxiter=5000)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset', 'p']
cls.init_kwds = {'inflation': 'logit', 'p': 2}
res2 = RandHIE()
res2.zero_inflated_negative_binomial()
cls.res2 = res2
def test_params(self):
assert_allclose(self.res1.params, self.res2.params,
atol=1e-3, rtol=1e-3)
def test_conf_int(self):
pass
def test_bic(self):
pass
def test_fit_regularized(self):
model = self.res1.model
alpha = np.ones(len(self.res1.params))
alpha[-2:] = 0
res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0, maxiter=500)
assert_allclose(res_reg.params[2:], self.res1.params[2:],
atol=1e-1, rtol=1e-1)
# possibly slow, adds 25 seconds
def test_minimize(self):
# check additional optimizers using the `minimize` option
model = self.res1.model
# use the same start_params, but avoid recomputing
start_params = self.res1.mle_settings['start_params']
res_ncg = model.fit(start_params=start_params,
method='minimize', min_method="trust-ncg",
maxiter=500, disp=0)
assert_allclose(res_ncg.params, self.res2.params,
atol=1e-3, rtol=0.03)
assert_allclose(res_ncg.bse, self.res2.bse,
atol=1e-3, rtol=0.06)
assert_(res_ncg.mle_retvals['converged'] is True)
res_dog = model.fit(start_params=start_params,
method='minimize', min_method="dogleg",
maxiter=500, disp=0)
assert_allclose(res_dog.params, self.res2.params,
atol=1e-3, rtol=3e-3)
assert_allclose(res_dog.bse, self.res2.bse,
atol=1e-3, rtol=7e-3)
assert_(res_dog.mle_retvals['converged'] is True)
res_bh = model.fit(start_params=start_params,
method='basinhopping', maxiter=500,
niter_success=3, disp=0)
assert_allclose(res_bh.params, self.res2.params,
atol=1e-4, rtol=3e-4)
assert_allclose(res_bh.bse, self.res2.bse,
atol=1e-3, rtol=1e-3)
# skip, res_bh reports converged is false but params agree
#assert_(res_bh.mle_retvals['converged'] is True)
class TestZeroInflatedNegativeBinomialP_predict(object):
@classmethod
def setup_class(cls):
expected_params = [1, 1, 0.5]
np.random.seed(987123)
nobs = 500
exog = np.ones((nobs, 2))
exog[:nobs//2, 1] = 0
prob_infl = 0.15
mu_true = np.exp(exog.dot(expected_params[:-1]))
cls.endog = sm.distributions.zinegbin.rvs(mu_true,
expected_params[-1], 2, prob_infl, size=mu_true.shape)
model = sm.ZeroInflatedNegativeBinomialP(cls.endog, exog, p=2)
cls.res = model.fit(method='bfgs', maxiter=5000, maxfun=5000)
# attach others
cls.prob_infl = prob_infl
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
rtol=0.01)
def test_var(self):
# todo check precision
assert_allclose((self.res.predict().mean() *
self.res._dispersion_factor.mean()),
self.endog.var(), rtol=0.2)
def test_predict_prob(self):
res = self.res
endog = res.model.endog
pr = res.predict(which='prob')
pr2 = sm.distributions.zinegbin.pmf(np.arange(pr.shape[1])[:,None],
res.predict(), 0.5, 2, self.prob_infl).T
assert_allclose(pr, pr2, rtol=0.1, atol=0.1)
prm = pr.mean(0)
pr2m = pr2.mean(0)
freq = np.bincount(endog.astype(int)) / len(endog)
assert_allclose(((pr2m - prm)**2).mean(), 0, rtol=1e-10, atol=5e-4)
assert_allclose(((prm - freq)**2).mean(), 0, rtol=1e-10, atol=1e-4)
def test_predict_generic_zi(self):
# These tests don't use numbers from other packages.
# Tests are on closeness of estimated to true/DGP values
# and theoretical relationship between quantities
res = self.res
endog = self.endog
exog = self.res.model.exog
prob_infl = self.prob_infl
nobs = len(endog)
freq = np.bincount(endog.astype(int)) / len(endog)
probs = res.predict(which='prob')
probsm = probs.mean(0)
assert_allclose(freq, probsm, atol=0.02)
probs_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='prob')
# no default for exog_infl yet
#probs_unique = res.predict(exog=[[1, 0], [1, 1]], which='prob')
probs_unique2 = probs[[1, nobs-1]]
assert_allclose(probs_unique, probs_unique2, atol=1e-10)
probs0_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='prob-zero')
assert_allclose(probs0_unique, probs_unique2[:, 0], rtol=1e-10)
probs_main_unique = res.predict(exog=[[1, 0], | |
"""Contains DeepSpeech2 model."""
import logging
import os
import shutil
import time
from datetime import datetime
from distutils.dir_util import mkpath
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
from visualdl import LogWriter
from utils.error_rate import char_errors, word_errors
from ctc_decoders.swig_wrapper import Scorer
from ctc_decoders.swig_wrapper import ctc_beam_search_decoder_batch
from ctc_decoders.swig_wrapper import ctc_greedy_decoder
from model_utils.network import deep_speech_v2_network
logging.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
class DeepSpeech2Model(object):
"""DeepSpeech2Model class.
:param vocab_size: Decoding vocabulary size.
:type vocab_size: int
:param num_conv_layers: Number of stacking convolution layers.
:type num_conv_layers: int
:param num_rnn_layers: Number of stacking RNN layers.
:type num_rnn_layers: int
:param rnn_layer_size: RNN layer size (number of RNN cells).
:type rnn_layer_size: int
:param use_gru: Use gru if set True. Use simple rnn if set False.
:type use_gru: bool
:param share_rnn_weights: Whether to share input-hidden weights between
forward and backward directional RNNs.Notice that
for GRU, weight sharing is not supported.
:type share_rnn_weights: bool
:param place: Program running place.
:type place: CPUPlace or CUDAPlace
:param init_from_pretrained_model: Pretrained model path. If None, will train
from stratch.
:type init_from_pretrained_model: string|None
:param output_model_dir: Output model directory. If None, output to current directory.
:type output_model_dir: string|None
"""
def __init__(self,
vocab_size,
num_conv_layers,
num_rnn_layers,
rnn_layer_size,
use_gru=False,
share_rnn_weights=True,
place=fluid.CPUPlace(),
init_from_pretrained_model=None,
output_model_dir=None,
is_infer=False,
error_rate_type='cer',
vocab_list=None):
self._vocab_size = vocab_size
self._num_conv_layers = num_conv_layers
self._num_rnn_layers = num_rnn_layers
self._rnn_layer_size = rnn_layer_size
self._use_gru = use_gru
self._share_rnn_weights = share_rnn_weights
self._place = place
self._init_from_pretrained_model = init_from_pretrained_model
self._output_model_dir = output_model_dir
self._ext_scorer = None
self.logger = logging.getLogger("")
self.logger.setLevel(level=logging.INFO)
self.writer = LogWriter(logdir='log')
self.error_rate_type = error_rate_type
self.vocab_list = vocab_list
# 预测相关的参数
self.infer_program = None
self.infer_feeder = None
self.infer_log_probs = None
self.infer_exe = None
if is_infer:
self.init_infer_program()
def create_network(self, is_infer=False):
"""Create data layers and model network.
:param is_training: Whether to create a network for training.
:type is_training: bool
:return reader: Reader for input.
:rtype reader: read generater
:return log_probs: An output unnormalized log probability layer.
:rtype lig_probs: Varable
:return loss: A ctc loss layer.
:rtype loss: Variable
"""
if not is_infer:
input_fields = {
'names': ['audio_data', 'text_data', 'seq_len_data', 'masks'],
'shapes': [[None, 161, None], [None, 1], [None, 1], [None, 32, 81, None]],
'dtypes': ['float32', 'int32', 'int64', 'float32'],
'lod_levels': [0, 1, 0, 0]
}
inputs = [
fluid.data(name=input_fields['names'][i],
shape=input_fields['shapes'][i],
dtype=input_fields['dtypes'][i],
lod_level=input_fields['lod_levels'][i])
for i in range(len(input_fields['names']))
]
reader = fluid.io.DataLoader.from_generator(feed_list=inputs,
capacity=64,
iterable=False,
use_double_buffer=True)
(audio_data, text_data, seq_len_data, masks) = inputs
else:
audio_data = fluid.data(name='audio_data',
shape=[None, 161, None],
dtype='float32',
lod_level=0)
seq_len_data = fluid.data(name='seq_len_data',
shape=[None, 1],
dtype='int64',
lod_level=0)
masks = fluid.data(name='masks',
shape=[None, 32, 81, None],
dtype='float32',
lod_level=0)
text_data = None
reader = fluid.DataFeeder([audio_data, seq_len_data, masks], self._place)
log_probs, loss = deep_speech_v2_network(audio_data=audio_data,
text_data=text_data,
seq_len_data=seq_len_data,
masks=masks,
dict_size=self._vocab_size,
num_conv_layers=self._num_conv_layers,
num_rnn_layers=self._num_rnn_layers,
rnn_size=self._rnn_layer_size,
use_gru=self._use_gru,
share_rnn_weights=self._share_rnn_weights)
return reader, log_probs, loss
def init_from_pretrained_model(self, exe, program):
'''Init params from pretrain model. '''
assert isinstance(self._init_from_pretrained_model, str)
if not os.path.exists(self._init_from_pretrained_model):
print(self._init_from_pretrained_model)
raise Warning("The pretrained params do not exist.")
fluid.io.load_params(executor=exe,
dirname=self._init_from_pretrained_model,
main_program=program,
filename="params.pdparams")
print("finish initing model from pretrained params from %s" % self._init_from_pretrained_model)
pre_epoch = 0
dir_name = self._init_from_pretrained_model.split('_')
if len(dir_name) >= 2 and dir_name[-2].endswith('epoch') and dir_name[-1].isdigit():
pre_epoch = int(dir_name[-1])
return pre_epoch + 1
def save_param(self, exe, program, dirname):
'''Save model params to dirname'''
assert isinstance(self._output_model_dir, str)
param_dir = os.path.join(self._output_model_dir)
if not os.path.exists(param_dir):
os.mkdir(param_dir)
fluid.io.save_params(executor=exe,
dirname=os.path.join(param_dir, dirname),
main_program=program,
filename="params.pdparams")
print("save parameters at %s" % (os.path.join(param_dir, dirname)))
return True
def test(self, test_reader):
'''Test the model.
:param test_reader: Reader of test.
:type test_reader: Reader
:return: Wer/Cer rate.
:rtype: float
'''
errors_sum, len_refs = 0.0, 0
errors_func = char_errors if self.error_rate_type == 'cer' else word_errors
# 初始化预测程序
self.init_infer_program()
for infer_data in test_reader():
# 执行预测
probs_split = self.infer_batch_probs(infer_data=infer_data)
# 使用最优路径解码
result_transcripts = self.decode_batch_greedy(probs_split=probs_split,
vocab_list=self.vocab_list)
target_transcripts = infer_data[1]
# 计算字错率
for target, result in zip(target_transcripts, result_transcripts):
errors, len_ref = errors_func(target, result)
errors_sum += errors
len_refs += len_ref
return errors_sum / len_refs
def train(self,
train_batch_reader,
dev_batch_reader,
learning_rate,
gradient_clipping,
num_epoch,
batch_size,
num_samples,
save_epoch=100,
num_iterations_print=100,
test_off=False):
"""Train the model.
:param train_batch_reader: Train data reader.
:type train_batch_reader: callable
:param dev_batch_reader: Validation data reader.
:type dev_batch_reader: callable
:param feeding_dict: Feeding is a map of field name and tuple index
of the data that reader returns.
:type feeding_dict: dict|list
:param learning_rate: Learning rate for ADAM optimizer.
:type learning_rate: float
:param gradient_clipping: Gradient clipping threshold.
:type gradient_clipping: float
:param num_epoch: Number of training epochs.
:type num_epoch: int
:param batch_size: Number of batch size.
:type batch_size: int
:param num_samples: The num of train samples.
:type num_samples: int
:param save_epoch: Number of training iterations for save checkpoint and params.
:type save_epoch: int
:param num_iterations_print: Number of training iterations for printing
a training loss.
:type num_iteratons_print: int
:param only_train_batch:Every epoch only train only_train_batch batch. Avoid insufficient video memory
:type only_train_batch:int
:param test_off: Turn off testing.
:type test_off: bool
"""
# prepare model output directory
if not os.path.exists(self._output_model_dir):
mkpath(self._output_model_dir)
if isinstance(self._place, fluid.CUDAPlace):
dev_count = fluid.core.get_cuda_device_count()
else:
dev_count = int(os.environ.get('CPU_NUM', 1))
# prepare the network
train_program = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_reader, _, ctc_loss = self.create_network()
# prepare optimizer
optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=fluid.layers.exponential_decay(
learning_rate=learning_rate,
decay_steps=num_samples / batch_size / dev_count,
decay_rate=0.83,
staircase=True),
regularization=fluid.regularizer.L2Decay(0.0001),
grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=gradient_clipping))
optimizer.minimize(loss=ctc_loss)
exe = fluid.Executor(self._place)
exe.run(startup_prog)
# init from some pretrain models, to better solve the current task
pre_epoch = 0
if self._init_from_pretrained_model:
pre_epoch = self.init_from_pretrained_model(exe, train_program)
build_strategy = compiler.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
# pass the build_strategy to with_data_parallel API
train_compiled_prog = compiler.CompiledProgram(train_program).with_data_parallel(loss_name=ctc_loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
train_reader.set_batch_generator(train_batch_reader)
train_step = 0
test_step = 0
num_batch = -1
# run train
for epoch_id in range(num_epoch):
train_reader.start()
epoch_loss = []
time_begin = time.time()
batch_id = 0
while True:
try:
fetch_list = [ctc_loss.name]
if batch_id % num_iterations_print == 0:
fetch = exe.run(program=train_compiled_prog,
fetch_list=fetch_list,
return_numpy=False)
each_loss = fetch[0]
epoch_loss.extend(np.array(each_loss[0]) / batch_size)
print("Train [%s] epoch: [%d/%d], batch: [%d/%d], train loss: %f\n" %
(datetime.now(), epoch_id, num_epoch, batch_id, num_batch,
np.mean(each_loss[0]) / batch_size))
# 记录训练损失值
self.writer.add_scalar('Train loss', np.mean(each_loss[0]) / batch_size, train_step)
train_step += 1
else:
_ = exe.run(program=train_compiled_prog,
fetch_list=[],
return_numpy=False)
batch_id = batch_id + 1
except fluid.core.EOFException:
train_reader.reset()
break
num_batch = batch_id
used_time = time.time() - time_begin
if test_off:
print('======================last Train=====================')
print("Train time: %f sec, epoch: %d, train loss: %f\n" %
(used_time, epoch_id, np.mean(np.array(epoch_loss))))
print('======================last Train=====================')
else:
print('\n======================Begin test=====================')
# 保存临时模型用于测试
self.save_param(exe, train_program, "temp")
# 设置临时模型的路径
self._init_from_pretrained_model = os.path.join(self._output_model_dir, 'temp')
# 支持测试
test_result = self.test(test_reader=dev_batch_reader)
# 删除临时模型
shutil.rmtree(os.path.join(self._output_model_dir, 'temp'))
print("Train time: %f sec, epoch: %d, train loss: %f, test %s: %f"
% (used_time, epoch_id + pre_epoch, np.mean(np.array(epoch_loss)), self.error_rate_type, test_result))
print('======================Stop Train=====================\n')
# 记录测试结果
self.writer.add_scalar('Test %s' % self.error_rate_type, test_result, test_step)
test_step += 1
if (epoch_id + 1) % save_epoch == 0:
self.save_param(exe, train_program, "epoch_" + str(epoch_id + pre_epoch))
self.save_param(exe, train_program, "step_final")
print("\n------------Training finished!!!-------------")
# 预测一个batch的音频
def infer_batch_probs(self, infer_data):
"""Infer the prob matrices for a batch of speech utterances.
:param infer_data: List of utterances to infer, with each utterance
consisting of a tuple of audio features and
transcription text (empty string).
:type infer_data: list
:param feeding_dict: Feeding is a map of field name and tuple index
of the data that reader returns.
:type feeding_dict: dict|list
:return: List of 2-D probability matrix, and each consists of prob
vectors for one speech utterancce.
:rtype: List of matrix
"""
# define inferer
infer_results = []
# run inference
for i in range(infer_data[0].shape[0]):
each_log_probs = self.infer_exe.run(program=self.infer_program,
feed=self.infer_feeder.feed(
[[infer_data[0][i], infer_data[2][i], infer_data[3][i]]]),
fetch_list=[self.infer_log_probs],
return_numpy=False)
infer_results.extend(np.array(each_log_probs[0]))
# slice result
infer_results = np.array(infer_results)
seq_len = (infer_data[2] - 1) // 3 + 1
start_pos = [0] * (infer_data[0].shape[0] + 1)
for i in range(infer_data[0].shape[0]):
start_pos[i + 1] = start_pos[i] + seq_len[i][0]
probs_split = [
infer_results[start_pos[i]:start_pos[i + 1]]
for i in range(0, infer_data[0].shape[0])
]
return probs_split
# 初始化预测程序,加预训练模型
def init_infer_program(self):
# define inferer
self.infer_program = fluid.Program()
startup_prog = fluid.Program()
# prepare the network
with fluid.program_guard(self.infer_program, startup_prog):
with fluid.unique_name.guard():
self.infer_feeder, self.infer_log_probs, _ = self.create_network(is_infer=True)
self.infer_program = self.infer_program.clone(for_test=True)
self.infer_exe = fluid.Executor(self._place)
self.infer_exe.run(startup_prog)
# init param from pretrained_model
if not self._init_from_pretrained_model:
exit("No pretrain model file path!")
self.init_from_pretrained_model(self.infer_exe, self.infer_program)
# 单个音频预测
def infer(self, feature):
"""Infer the prob matrices for a batch of speech utterances.
:param infer_data: List of utterances to infer, with each utterance
consisting of a tuple of audio features and
transcription text (empty string).
:type infer_data: list
:param feeding_dict: Feeding is a map of field name and tuple index
of the data that reader returns.
:type feeding_dict: dict|list
| |
catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x | |
<reponame>cancerregulome/inspectra
#!/usr/bin/env python
# Copyright (c) 2012, the GraphSpectrometer Contributors listed at
# http://github.com/ryanbressler/GraphSpectrometer/graphs/contributors
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Institute for Systems Biology, GraphSpectrometer nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
python script/module that uses pyamg to calculat and plot fiedler vectors of a graph
using pyamg,numpy and scipy.
Input:
A sif file or any three column white space deliminated file with the first and
third column repesenting node names and each row repesenting an edge.
Comand line Usage:
python fiedler.py my.sif
Can also be used on rf-ace output files provided the file has a ".out" exstension
or pairwise files with a ".pwpv" extension.
Or with x args as a thread pool to plot many sif files:
ls *.sif | xargs --max-procs=8 -I FILE python fiedler.py FILE
A minimum edge cutoff can also be specified:
fiedler.py FILE .5
By default generates a number of pngs of diffrent sorts of plots and a .json file containing:
{"f1": the first fiedler vector,
"f2": (if caclulated) the second fideler vector
"d": the node degrees,
"r1": the rank of each node in the first fiedler vector
"r2": the rank of each node in the second fiedler vector
"iByn": the index of the nodes by the string used to represent them in the input file
"nByi": the string used to represent nodes in the input file by their index in the graph
"adj": the adjascancy list}
Author/Contact:<NAME>, <EMAIL>/gmail.com
"""
import sys
import json
import math
import random
import itertools
import os
try:
import hypergeom
except ImportError:
print "hypergeom not found"
import numpy
import scipy
from scipy.sparse.linalg import lobpcg
from scipy import linalg
from scipy.sparse import coo_matrix
from pyamg import smoothed_aggregation_solver
import matplotlib as mpl
import pylab as pl
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from sklearn import mixture
from sklearn.cluster import DBSCAN
def file_parse(fo, node1=0, node2=1, filter_col=-1, filter_min=.5, val_col=-1, blacklist=[]):
"""parse a sif like file into an adjascancy list by index in a matrix and node name look up tables.
Takes:
f0: A file like object containing a sif or similar white space deliminated file containing at at least 2
columns of node names that are legal python dictionary keys deliminated by tabs or spaces.
node1=0 : the index of the column containing the first node
node2=2 : the index of the column containing the second node2
Returns a tuple containing:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
Representing the adjascancy list.
A dictionary containing int ids in the above by the string name in the input file.
An array of strings containing the name in the input by the int id.
"""
out = []
intidsbyname = {}
namesbyintid = []
incintid=0
len_blacklist=len(blacklist)
for line in fo:
if line[1]=="#":
continue
vs = line.rstrip().split()
if len(vs)>node2:
if filter_col!=-1:
if math.fabs(float(vs[filter_col]))<filter_min:
continue
if len_blacklist>0:
skip = False
for black_sheep in blacklist:
for strid in [vs[node1],vs[node2]]:
if strid.find(black_sheep)!=-1:
skip = True
continue
if skip==True:
continue
if skip==True:
continue
for strid in [vs[node1],vs[node2]]:
if not strid in intidsbyname:
intidsbyname[strid]=incintid
namesbyintid.append(strid)
incintid = incintid+1
row =[intidsbyname[vs[node1]],intidsbyname[vs[node2]]]
if val_col!=-1:
row.append(math.fabs(float(vs[val_col])))
out.append(row)
fo.close()
return (out,intidsbyname,namesbyintid)
def adj_mat(adj_list):
"""get the graph laplacian (in coo_matrix sparse matrix form) of an
adjancy list.0
Takes:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
or an Nx3 list in the form:
[[node1,node2,value],
...]
Representing the adjascancy list.
Returns
The adjasancy matrix in coo_matrix format.
"""
adj=numpy.array(adj_list)
Npts = numpy.max(adj[:,:2])+1
data = numpy.ones(adj.shape[0],dtype=float)
if adj.shape[1]>2:
data=adj[:,2]
A = coo_matrix((data,(adj[:,0],adj[:,1])), shape=(Npts,Npts))
return (A,adj,Npts)
def adj_list(adj_mat,includeValue=True):
am=adj_mat.tocoo()
rv=numpy.column_stack((am.row,am.col,am.data)).tolist()
for row in rv:
row[0]=int(row[0])
row[1]=int(row[1])
return rv
def graph_laplacian(adj_list):
"""get the graph laplacian (in coo_matrix sparse matrix form) of an
adjancy list.0
Takes:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
Representing the adjascancy list.
Returns
The graph laplaciian in coo_matrix format.
"""
(A,adj,Npts) = adj_mat(adj_list)
A = -1*(A.T + A)/2
A=A.tocsr()
if len(adj_list[0])==2:
A.data = -1*numpy.ones((A.nnz,),dtype=float)
A.setdiag(numpy.zeros((Npts,),dtype=float))
A.setdiag(-1*numpy.array(A.sum(axis=1)).ravel())
return A.tocsr()
def fiedler(adj_list,plot=False,fn="FiedlerPlots",n_fied=2):
"""calculate the first fiedler vector of a graph adjascancy list and optionally write associated plots to file.
Takes:
adj_list:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
Representing the adjascancy list.
plot=False: make plots or not.
fn="FiedlerPlots": filename to prepend to the plot png file names
n_fied=2: the number of fiedler vectors to calculate (values above 2 will not be output)
Returns a Dictionary of the form:
{"f1": the first fiedler vector,
"f2": (if caclulated) the second fideler vector
"d": the node degrees,
"r1": the rank of each node in the first fiedler vector
"r2": the rank of each node in the second fiedler vector}
"""
A = graph_laplacian(adj_list)
# construct preconditioner
ml = smoothed_aggregation_solver(A, coarse_solver='pinv2',max_coarse=10)
M = ml.aspreconditioner()
# solve for lowest two modes: constant vector and Fiedler vector
X = scipy.rand(A.shape[0], n_fied+1)
(eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
verbosityLevel=0, retResidualNormsHistory=True)
if plot:
doPlots(evec[:,1],evec[:,2],A.diagonal(),adj_list,fn)
out = {"f1":list(evec[:,1]),"d":list(A.diagonal()),"r1":[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,1])))]}
if n_fied > 1:
out["f2"]=list(evec[:,2])
out["r2"]=[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,2])))]
return out
#Plots are not optimized ...ie they end up sorting the same thing multiple times
def doPlots(f1,f2,degrees,adj_list,fn,widths=[16],heights=False,vsdeg=True,nByi=False,adj_list2=False,directed=False,dbscan_eps=0,dbscan_rank_eps=0,enrichdb="",clust_x=False,clust_y=False,clust_xy=True,dorank=True,doraw=True):
# output first
if vsdeg:
plotFiedvsDeg(f1,degrees,fn)
#if n_fied>1:
for i,width in enumerate(widths):
height=width
if heights!=False:
height=heights[i]
#output fied vs fied:
plotFiedvsFied(f1,f2,fn,adj_list=adj_list,adj_list2=adj_list2,width=width,height=height,nByi=nByi,directed=directed,dbscan_eps=dbscan_eps,dbscan_rank_eps=dbscan_rank_eps,enrichdb=enrichdb,clust_x=clust_x,clust_y=clust_y,clust_xy=clust_xy,dorank=dorank,doraw=doraw)
#output second
if vsdeg:
plotFiedvsDeg(f2,degrees,fn+".second")
def plotEdges(x,y,ax,adj_list,width,height,color="green",directed=False):
#codes=[]
#points=[]
emax = x.max()
for edge in adj_list:
#points[len(points):]=[(x[edge[0]],y[edge[0]]),(x[edge[1]],y[edge[1]])]
points=[(x[edge[0]],y[edge[0]]),(x[edge[1]],y[edge[1]])]
#codes[len(codes):]=[mpath.Path.MOVETO,mpath.Path.LINETO]
codes=[mpath.Path.MOVETO,mpath.Path.LINETO]
alpha=.5
if len(edge)>2:
alpha=0
if float(edge[2])>0:
#alpha=math.sqrt(float(edge[2]))
alpha=float(edge[2])
if directed:
dx=points[1][0]-points[0][0]
dy=points[1][1]-points[0][1]
length = math.sqrt(dx*dx+dy*dy)
head_width=emax*.3*length/(width*math.fabs(dy)+height*math.fabs(dx))
head_length=emax*.4*length/(height*math.fabs(dy)+width*math.fabs(dx))
ax.arrow(points[0][0],points[0][1],dx,dy,width=.2*head_width,head_width=head_width,head_length=head_length,color=color,alpha=alpha,length_includes_head=True)
else:
patch = mpatches.PathPatch(mpath.Path(points,codes), edgecolor=color, lw=.2,alpha=alpha)
ax.add_patch(patch)
def PlotEdgeVvsEdgeV(adj1,adj2,nByi1,nByi2,fn,width=16):
edgevs = {}
nedges = 0
nByis=[nByi1,nByi2]
for i,adj in enumerate([adj1,adj2]):
for edge in adj:
[e0,e1,v]=edge
e0=nByis[i][e0]
e1=nByis[i][e1]
if not e0 in edgevs:
edgevs[e0]={}
if not e1 in edgevs[e0]:
edgevs[e0][e1]={}
nedges+=1
edgevs[e0][e1][i]=float(v)
x = numpy.zeros((nedges,),dtype=float)
y = numpy.zeros((nedges,),dtype=float)
i = 0
for n0 in edgevs:
for n1 in edgevs[n0]:
e = edgevs[n0][n1]
if 0 in e:
x[i]=e[0]
if 1 in e:
y[i]=e[1]
i=i+1
F = plt.figure()
ax = F.add_subplot(111)
ax.scatter(x, y,zorder=2)
i = 0
for n0 in edgevs:
for n1 in edgevs[n0]:
plt.annotate(
"->".join([":".join(n.split(":")[1:3]) for n in [n0,n1]]),
xy = (x[i], y[i]), xytext = (-0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',size=8,alpha=.4)
i+=1
ax.grid(True)
F.set_size_inches( (width,width) )
F.savefig(fn+".EdgeVvsEdgeV.width%s.pdf"%(width),bbox_inches='tight')
F.clear()
def doDbScan(plt,ax,fied1,fied2,fn,adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,dbscan_eps,enrichdb,axis="xy"):
"""
add enriched dbscan information to a plot
"""
X=0
minormin = 0
if axis == "x":
print "dbscaning x at %s"%(dbscan_eps)
X=numpy.transpose(numpy.column_stack((fied1)))
minormin = fied2.min()
elif axis == "y":
print "dbscaning y at %s"%(dbscan_eps)
X=numpy.transpose(numpy.column_stack((fied2)))
minormin = fied1.min()
else:
print "dbscaning xy at %s"%(dbscan_eps)
X=numpy.column_stack((fied1,fied2))
db = DBSCAN(eps=dbscan_eps, min_samples=10).fit(X)
core_samples = db.core_sample_indices_
labels = db.labels_
print "Found %s core samples and %s labels"%(len(core_samples),len(labels))
colors=[(random.random(),random.random(),random.random()) for el in labels]
backgroundgenes =[]
enrich=False
enriched = []
if nByi!=False and enrichdb!="":
enrich=True
backgroundgenes = [gene for gene in [nodelabel.split(":")[2] for nodelabel in nByi] if gene!=""]
for k, col in zip(set(labels), | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import os
import json
import shutil
import tempfile
import subprocess
import itertools
import operator
import click
__all__ = ['check', 'parse_dep']
# https://github.com/davglass/license-checker#custom-format
LICENSE_CHECKER_FORMAT_KEYS = [
'name', 'repository', 'dependencyPath', 'url', 'version', 'licenses',
'licenseText', 'licenseModified', 'licenseFile', 'path',
]
# https://github.com/davglass/license-checker#custom-format
LICENSE_CHECKER_FORMAT = {key: None for key in LICENSE_CHECKER_FORMAT_KEYS}
# Search Oracle wiki for 'Licenses Eligible for Pre-Approval - Distribution'
# List of official license codes: https://spdx.org/licenses/
LICENSES_DISTRIBUTED = [
'Apache', 'Apache-1.1', 'Apache-2.0', '0BSD', 'BSD', 'BSD-2-Clause',
'BSD-3-Clause', 'ISC', 'MIT', 'PHP-3.0', 'UPL', 'UPL-1.0', 'ZPL-2.0',
'Unlicense', 'Python-2.0', 'FreeBSD', 'Apache License, Version 2.0',
]
LICENSES_DISTRIBUTED += [license + '*' for license in LICENSES_DISTRIBUTED]
MISSING_COPYRIGHT_NOTICE_WARNING = '!!! MISSING COPYRIGHT NOTICE !!!'
MISSING_LICENSE_TEXT_WARNING = '!!! MISSING LICENSE !!!'
def check(dep, list_path, licenses_path, dev=False, debug=False):
"""
Checks the dependency for licenses and vulnerabilities before you can
add it to the Third-Party Approval Process:
#. Installs the dependency in a temporary directory
#. Checks licenses
#. Checks for ``PATENTS`` files (TODO)
#. Provides you with:
- The list of 4th party deps for the Technology Usage Note field
- The contents of the Public License field
- List of packages not eligible for Pre-Approval
- List of known vulnerabilities (TODO)
Example::
bb dep check react@16.2
Requirements:
- ``npm install -g license-checker``
- ``npm install -g licensecheck`` (TODO: ``licensecheck --once --dev --tsv``)
- ``npm install -g nsp`` (TODO: ``nsp check``)
- ``npm install -g snyk && snyk auth`` (TODO: ``snyk test``)
"""
ensure_executables(['npm', 'license-checker'])
dep_name, dep_version = dep
click.echo('Analyzing the package `{0}{1}` ...'.format(dep_name, '' if dep_version is None else '@' + dep_version))
if dep_version is None: # check in the supplied project_dir
project_dir = dep_name
package_json = os.path.join(project_dir, 'package.json')
with open(package_json) as f:
package_data = json.load(f)
dep_name = package_data['name'];
licenses = license_checker(project_dir)
package_tree = get_package_tree(project_dir)
else:
# check the supplied npm dep_name@dep_version module by installing it first
with tempfile.TemporaryDirectory() as tmp_dir:
try:
install(dep_name, dep_version, tmp_dir, dev=dev)
except Exception as e:
if debug:
raise
raise click.BadParameter('The npm package could not be installed')
licenses = license_checker(tmp_dir)
package_tree = get_package_tree(tmp_dir)
pre_approval_verdict = get_pre_approval_verdict(licenses)
details, fourth_party_licenses = separate_top_level_details(licenses, dep_name)
click.echo('Creating the list of 4th party deps... {}'.format(list_path.name))
list_path.write(create_deps_list(fourth_party_licenses))
click.echo('Creating the Public License field contents... {}'.format(licenses_path.name))
licenses_path.write(create_licenses_list(details, fourth_party_licenses))
color = 'green' if pre_approval_verdict else 'red'
click.secho('\n{name}@{version}'.format(**details), bold=True, fg=color)
click.echo((
'License: {licenses}\n'
'Copyright Notice: {copyright_notice}\n'
'Dependencies: {dependencies}\n'
'Eligible for Pre-Approval: {pre_approval_verdict}\n\n'
'Description: {description}\n'
'Package: https://npmjs.com/package/{name}\n'
'Repo: {repo}\n'
).format(
licenses=details['licenses'],
copyright_notice=details['copyright_notice'],
dependencies=len(fourth_party_licenses),
pre_approval_verdict=pre_approval_verdict,
description=details.get('description') or 'N/A',
name=details['name'],
repo=details.get('repo') or 'N/A',
))
problematic_licenses = [
details for details in licenses
if details['not_pre_approved_reasons']
]
if problematic_licenses:
heading = '\nProblematic Licenses: {0}'.format(len(problematic_licenses))
click.secho(heading, bold=True, fg=color)
missing = False
for details in problematic_licenses:
reasons = ', '.join(details['not_pre_approved_reasons'])
missing = missing or 'missing' in reasons
line = click.style('{name}@{version} ({licenses})'.format(**details), bold=True)
click.echo('{0} - {1}'.format(line, reasons))
if debug:
click.echo(' ・ npm: https://www.npmjs.com/package/{0}'.format(details['name']))
if details.get('repo'):
click.echo(' ・ repo: {0}'.format(details['repo']))
if details.get('license_file'):
click.echo(' ・ license file: {0}'.format(details['license_file']))
breadcrumbs = get_package_breadcrumbs(package_tree, details['name'], details['version'])
if len(breadcrumbs) > 0:
for breadcrumb in breadcrumbs:
click.echo(' ・ found in dependency path: {}'.format(' > '.join(breadcrumb)))
if missing:
click.echo(
'\nBad luck! Before adding the dependency to the approval '
'process you need to manually go through the dependencies, '
'get the missing info and complete the generated files '
'with it.'
)
if not debug:
click.echo('\nProTip: You can use --debug to print more details.')
return pre_approval_verdict
def install(dep_name, dep_version, project_dir, dev=False):
click.echo('Getting dependencies...')
dep = '{0}@{1}'.format(dep_name, dep_version)
package_json = os.path.join(project_dir, 'package.json')
if not os.path.exists(package_json):
# Create package.json - It is required to be able to run `npm ls`
with open(package_json, 'w') as fp:
fp.write('{"name": "black-belt"}')
run(['npm', 'install', '--save', dep], cwd=project_dir)
if dev:
click.echo('Getting dev dependencies...')
shutil.copy(
os.path.join(project_dir, 'node_modules', dep_name, 'package.json'),
os.path.join(project_dir),
)
package_json = os.path.join(project_dir, 'package.json')
with open(package_json) as f:
package_data = json.load(f)
package_data_just_deps = {}
for key, value in package_data.items():
if 'dependencies' in key.lower():
package_data_just_deps[key] = value
with open(package_json, 'w') as f:
json.dump(package_data_just_deps, f)
run(['npm', 'install'], cwd=project_dir)
def get_pre_approval_verdict(licenses):
return all(not details['not_pre_approved_reasons'] for details in licenses)
def create_deps_list(licenses):
return '\n'.join([
'{name}@{version} ({licenses})'.format(**details)
for details in licenses
])
# See the internal FAQ and search for following questions:
#
# - What information is required in the Public License field?
# - What's the best way to format all the information in the Public
# License field?
# - If there are multiple dependencies licensed under the same terms,
# do I need to repeat those terms for every dependency?
def create_licenses_list(top_level_details, fourth_party_licenses):
sections = []
sections.append(top_level_details['copyright_notice'])
sections.append(top_level_details['license_text'])
key_fn = operator.itemgetter('license_text')
licenses = sorted(fourth_party_licenses, key=key_fn)
identical_license_groups = itertools.groupby(licenses, key_fn)
for license_text, details_list in identical_license_groups:
details_list = list(details_list)
if len(details_list) == 1:
section = (
'{name}@{version} ({licenses})\n'
'{copyright_notice}\n'
'{license_text}'
).format(**details_list[0])
sections.append(section)
else:
for details in details_list:
section = (
'{name}@{version} ({licenses})\n'
'{copyright_notice}'
).format(**details)
sections.append(section)
section = ''.join([
'{name}@{version}\n'.format(**details)
for details in details_list
]) + '\n' + license_text
sections.append(section)
separator = '\n---------separator---------\n'
return separator.join(sections) + separator
def separate_top_level_details(licenses, dep_name):
dep_details = None
fourth_party_licenses = []
for details in licenses:
if details['name'] == dep_name:
dep_details = details
else:
fourth_party_licenses.append(details)
return (dep_details, fourth_party_licenses)
def license_checker(project_dir):
format_file = os.path.join(project_dir, 'format.json')
with open(format_file, 'w') as f:
json.dump(LICENSE_CHECKER_FORMAT, f)
args = ['--unknown', '--json', '--customPath', format_file]
output = run(['license-checker'] + args, cwd=project_dir)
licenses = []
for details in json.loads(output).values():
with open(os.path.join(details['path'], 'package.json')) as f:
pkg_data = json.load(f)
copyright_notice, license_text = parse_license_text(details.get('licenseText'))
license_names = parse_license_names(details.get('licenses'))
details = {
'name': details['name'],
'version': details['version'],
'description': pkg_data.get('description'),
'repo': details.get('repository'),
'license_file': parse_license_filename(details['name'], details.get('licenseFile')),
'copyright_notice': copyright_notice or create_copyright_notice(pkg_data) or MISSING_COPYRIGHT_NOTICE_WARNING,
'license_text': license_text or MISSING_LICENSE_TEXT_WARNING,
'licenses': license_names,
}
details['not_pre_approved_reasons'] = check_pre_approval_elligibility(details)
licenses.append(details)
return licenses
def get_package_tree(project_dir):
"""
Returns the "tree" of the package and its dependencies in
the NPM ls json format.
"""
output = run(['npm', 'ls', '--json'], cwd=project_dir, check=False)
return json.loads(output)
def get_package_breadcrumbs(package_tree, name, version):
"""
Takes a npm ls JSON tree and looks up the paths to the given
dependency (name and version).
Returns an array of paths. Where a path is an array of
dependencies leading to the given dependency in the tree.
>>> get_package_breadcrumbs(tree, 'minim', '1.0.0')
[
['fury-adapter-swagger@1.0.0'],
['fury@2.0.0'],
['apielements@0.1.0', 'fury@2.0.0']
]
"""
def traverse_dependencies(dependencies, path):
"""
Inline function to be called recursively to check for dependency and
pass down the path to further dependencies.
"""
results = []
for dependency_name in dependencies:
dependency = dependencies[dependency_name]
if dependency_name == name and dependency.get('version') == version:
# Found dependency in path
results.append(path)
continue
if 'dependencies' in dependency:
# Traverse dependency dependencies
sub_dependencies = dependency['dependencies']
path_component = '{}@{}'.format(dependency_name, dependency['version'])
results += traverse_dependencies(sub_dependencies, path + [path_component])
return results
return traverse_dependencies(package_tree['dependencies'], [])
def ensure_executables(executables):
for executable in executables:
try:
# check=False, because some programs return non-zero status when
# they print --version output
run([executable, '--version'], check=False)
except FileNotFoundError:
if executable == 'npm':
msg = "'npm' is needed, but it's not installed."
click.echo(msg, err=True)
raise click.Abort()
else:
confirm = (
"'{0}' is needed, but it's not installed. "
"Install by 'npm install -g {0}'?"
)
click.confirm(confirm.format(executable), abort=True)
run(['npm', 'install', '-g', executable])
# Unfortunately, we cannot use subprocess.run(), because BB still supports Py2
def run(args, cwd=None, check=True):
kwargs = {'cwd': cwd}
try:
with open(os.devnull, 'w') as devnull:
kwargs['stderr'] = devnull
output = subprocess.check_output(args, **kwargs)
except subprocess.CalledProcessError as e:
if check:
raise
else:
return e.stdout.decode().strip()
else:
return output.decode().strip()
def parse_license_names(value):
try:
return value.lstrip('(').rstrip(')')
except AttributeError:
# Someone is using the deprecated 'licenses' field, we got a list.
# Since we have no idea whether these licenses should be written
# with OR, AND, ... let's join it by commas.
#
# See also https://docs.npmjs.com/files/package.json#license
return ', '.join(filter(None, value))
def parse_license_text(text):
license_text = (text or '').strip()
copyright_notice = detect_copyright_notice(license_text, require_year=True)
# If the license doesn't contain any of the following words, it's suspicius
# and should be classified as "rubbish" (sometimes the license-checker picks
# up a README file without any real license text).
license_text_lc = license_text.lower()
if (
'software' not in license_text_lc and
'copyright' not in license_text_lc and
'license' not in license_text_lc
):
return (None, None)
if 'Apache License' in license_text:
return (copyright_notice, license_text)
copyright_notice = detect_copyright_notice(license_text)
if copyright_notice:
license_text = text.split(copyright_notice)[1]
license_text = license_text.strip()
license_text = re.sub(r' +', ' ', license_text)
license_text = re.sub(r' ?(\r\n|\n)+ ?', remove_newlines_keep_paragraps, license_text)
return (copyright_notice, license_text)
def detect_copyright_notice(copyright_text, require_year=False):
for line |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.