gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import datetime as dt
import psycopg2
import pytest
import sqlalchemy as sa
from nycodex import db
from nycodex.db.queue import queue
def test_update_from_metadata_empty(conn):
query = sa.select([sa.func.count()]).select_from(queue)
assert 0 == conn.execute(query).fetchone()[0]
db.queue.update_from_metadata(conn)
assert 0 == conn.execute(query).fetchone()[0]
def dataset_factory(id: str) -> db.Dataset:
return db.Dataset(
id=id,
owner_id=id,
description="",
is_official=True,
name="test1",
page_views_total=100,
page_views_last_week=1,
page_views_last_month=10,
categories=[],
domain_category=db.DomainCategory.BUSINESS.value,
asset_type=db.AssetType.CALENDAR.value,
created_at=dt.datetime(2017, 1, 1, 11, 30, 0),
updated_at=dt.datetime(2017, 1, 5, 11, 30, 0),
is_auto_updated=True,
parents=[],
domain_tags=[],
column_names=[],
column_field_names=[],
column_sql_names=[],
column_types=[],
column_descriptions=[])
@pytest.fixture
def fake_dataset(session):
dataset = dataset_factory("abcd-0000")
session.add(dataset_factory("abcd-0000"))
session.commit()
yield dataset.id
def test_update_from_metadata(conn, session):
datasets = [
dataset_factory("abcd-0000"),
dataset_factory("abcd-0001"),
dataset_factory("abcd-0002"),
dataset_factory("abcd-0003"),
]
datasets[0].asset_type = db.AssetType.DATASET.value
datasets[1].asset_type = db.AssetType.MAP.value
datasets[2].asset_type = db.AssetType.CALENDAR.value
datasets[3].asset_type = db.AssetType.CALENDAR.value
session.add_all(datasets)
session.commit()
query = sa.select([
queue.c.dataset_id, queue.c.updated_at, queue.c.scraped_at,
queue.c.processed_at, queue.c.retries
])
orig = dt.datetime(2017, 1, 4, 11, 0, 0)
new = dt.datetime(2017, 1, 5, 11, 30, 0) # line up with factory
conn.execute(queue.insert().values(
dataset_id="abcd-0000", updated_at=orig, scraped_at=orig, retries=1))
db.queue.update_from_metadata(conn)
assert conn.execute(query).fetchall() == [
("abcd-0000", new, orig, None, 0),
("abcd-0001", new, None, None, 0),
]
def test_update_from_metadata_future(conn, session):
dataset = dataset_factory("abcd-0000")
dataset.asset_type = db.AssetType.DATASET.value
dataset.updated_at = dt.datetime.now() + dt.timedelta(days=10)
session.add(dataset)
session.commit()
db.queue.update_from_metadata(conn)
assert [("abcd-0000", None, None, 0)] == conn.execute(
sa.select([
queue.c.dataset_id, queue.c.scraped_at, queue.c.processed_at,
queue.c.retries
])).fetchall() == [("abcd-0000", None, None, 0)]
updated_at = conn.execute(sa.select([queue.c.updated_at])).fetchone()[0]
assert updated_at < dt.datetime.now() + dt.timedelta(minutes=1)
def test_next_row_to_scrape_empty(conn):
with db.queue.next_row_to_scrape(conn) as (conn, dataset_id):
assert conn is None
assert dataset_id is None
def test_next_row_to_scrape(conn, fake_dataset):
start = dt.datetime.now()
orig = start - dt.timedelta(days=2)
conn.execute(queue.insert().values(
dataset_id=fake_dataset, retries=0, updated_at=orig))
query = sa.select([
queue.c.dataset_id, queue.c.updated_at, queue.c.scraped_at,
queue.c.processed_at, queue.c.retries
])
# On error, increment `retries` while leaving `updated_at` alone
with pytest.raises(ZeroDivisionError):
with db.queue.next_row_to_scrape(conn) as (c, dataset_id):
assert c is not None
assert dataset_id == "abcd-0000"
raise ZeroDivisionError
assert conn.execute(query).fetchone() == ("abcd-0000", orig, None, None, 1)
# On success, reset `retries` and update `scraped_at`
with db.queue.next_row_to_scrape(conn) as (c, dataset_id):
assert c is not None
assert dataset_id == fake_dataset
row = conn.execute(query).fetchone()
assert row.dataset_id == fake_dataset
assert row.updated_at == orig
assert row.processed_at is None
assert row.retries == 0
# Add 1 minute buffer for clock mismatch issues
assert start - dt.timedelta(minutes=1) < row.scraped_at
assert row.scraped_at - dt.timedelta(minutes=1) < dt.datetime.now()
# Nothing left in the queue
with db.queue.next_row_to_scrape(conn) as (c, dataset_id):
assert c is None
assert dataset_id is None
def test_next_row_to_process(conn, fake_dataset):
start = dt.datetime.now()
original = start - dt.timedelta(days=2)
conn.execute(queue.insert().values(
dataset_id=fake_dataset, retries=0, updated_at=original))
query = sa.select([
queue.c.dataset_id, queue.c.updated_at, queue.c.scraped_at,
queue.c.processed_at, queue.c.retries
])
# Without a scraped dataset, don't do anything
with db.queue.next_row_to_process(conn) as (c, dataset_id):
assert c is None
assert dataset_id is None
conn.execute(queue.update().where(
queue.c.dataset_id == fake_dataset).values(scraped_at=original))
# On success, reset `retries` and update `processed_at`
with db.queue.next_row_to_process(conn) as (__, dataset_id):
assert dataset_id == fake_dataset
row = conn.execute(query).fetchone()
assert row.dataset_id == fake_dataset
# Add 1 minute buffer for clock mismatch issues
assert start - dt.timedelta(minutes=1) < row.processed_at
assert row.processed_at - dt.timedelta(minutes=1) < dt.datetime.now()
assert row.retries == 0
# Nothing left in the queue
with db.queue.next_row_to_process(conn) as (c, dataset_id):
assert c is None
assert dataset_id is None
def test_db_failure(engine):
Session = sa.orm.sessionmaker(bind=engine)
with engine.connect() as conn:
session = Session(bind=conn)
trans = conn.begin()
dataset = dataset_factory("abcd-0000")
session.add(dataset)
session.commit()
conn.execute(queue.insert().values(
dataset_id=dataset.id,
retries=0,
updated_at=dt.datetime.now() - dt.timedelta(days=1)))
with pytest.raises(sa.exc.InternalError):
with db.queue.next_row_to_scrape(conn) as (c, dataset_id):
assert c is not None
with pytest.raises(psycopg2.ProgrammingError):
conn.execute("SELECT * FROM non_existent_datbase")
trans.rollback()
assert conn.closed
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Csiszar Divergence Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.bayesflow.python.ops import csiszar_divergence_impl
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib
from tensorflow.contrib.distributions.python.ops import mvn_full_covariance as mvn_full_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
cd = csiszar_divergence_impl
def tridiag(d, diag_value, offdiag_value):
"""d x d matrix with given value on diag, and one super/sub diag."""
diag_mat = linalg_ops.eye(d) * (diag_value - offdiag_value)
three_bands = array_ops.matrix_band_part(
array_ops.fill([d, d], offdiag_value), 1, 1)
return diag_mat + three_bands
class AmariAlphaTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
for alpha in [-1., 0., 1., 2.]:
for normalized in [True, False]:
with self.test_session(graph=ops.Graph()):
self.assertAllClose(
cd.amari_alpha(0., alpha=alpha,
self_normalized=normalized).eval(),
0.)
def test_correct_when_alpha0(self):
with self.test_session():
self.assertAllClose(
cd.amari_alpha(self._logu, alpha=0.).eval(),
-self._logu)
self.assertAllClose(
cd.amari_alpha(self._logu, alpha=0., self_normalized=True).eval(),
-self._logu + (self._u - 1.))
def test_correct_when_alpha1(self):
with self.test_session():
self.assertAllClose(
cd.amari_alpha(self._logu, alpha=1.).eval(),
self._u * self._logu)
self.assertAllClose(
cd.amari_alpha(self._logu, alpha=1., self_normalized=True).eval(),
self._u * self._logu - (self._u - 1.))
def test_correct_when_alpha_not_01(self):
for alpha in [-2, -1., -0.5, 0.5, 2.]:
with self.test_session(graph=ops.Graph()):
self.assertAllClose(
cd.amari_alpha(self._logu,
alpha=alpha,
self_normalized=False).eval(),
((self._u**alpha - 1)) / (alpha * (alpha - 1.)))
self.assertAllClose(
cd.amari_alpha(self._logu,
alpha=alpha,
self_normalized=True).eval(),
((self._u**alpha - 1.)
- alpha * (self._u - 1)) / (alpha * (alpha - 1.)))
class KLReverseTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
for normalized in [True, False]:
with self.test_session(graph=ops.Graph()):
self.assertAllClose(
cd.kl_reverse(0., self_normalized=normalized).eval(),
0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.kl_reverse(self._logu).eval(),
-self._logu)
self.assertAllClose(
cd.kl_reverse(self._logu, self_normalized=True).eval(),
-self._logu + (self._u - 1.))
class KLForwardTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
for normalized in [True, False]:
with self.test_session(graph=ops.Graph()):
self.assertAllClose(
cd.kl_forward(0., self_normalized=normalized).eval(),
0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.kl_forward(self._logu).eval(),
self._u * self._logu)
self.assertAllClose(
cd.kl_forward(self._logu, self_normalized=True).eval(),
self._u * self._logu - (self._u - 1.))
class JensenShannonTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.jensen_shannon(0.).eval(), np.log(0.25))
def test_symmetric(self):
with self.test_session():
self.assertAllClose(
cd.jensen_shannon(self._logu).eval(),
cd.symmetrized_csiszar_function(
self._logu, cd.jensen_shannon).eval())
self.assertAllClose(
cd.jensen_shannon(self._logu, self_normalized=True).eval(),
cd.symmetrized_csiszar_function(
self._logu,
lambda x: cd.jensen_shannon(x, self_normalized=True)).eval())
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.jensen_shannon(self._logu).eval(),
(self._u * self._logu
- (1 + self._u) * np.log1p(self._u)))
self.assertAllClose(
cd.jensen_shannon(self._logu, self_normalized=True).eval(),
(self._u * self._logu
- (1 + self._u) * np.log((1 + self._u) / 2)))
class ArithmeticGeometricMeanTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.arithmetic_geometric(0.).eval(), np.log(4))
self.assertAllClose(
cd.arithmetic_geometric(0., self_normalized=True).eval(), 0.)
def test_symmetric(self):
with self.test_session():
self.assertAllClose(
cd.arithmetic_geometric(self._logu).eval(),
cd.symmetrized_csiszar_function(
self._logu, cd.arithmetic_geometric).eval())
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.arithmetic_geometric(self._logu).eval(),
(1. + self._u) * np.log((1. + self._u) / np.sqrt(self._u)))
self.assertAllClose(
cd.arithmetic_geometric(self._logu, self_normalized=True).eval(),
(1. + self._u) * np.log(0.5 * (1. + self._u) / np.sqrt(self._u)))
class TotalVariationTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.total_variation(0.).eval(), 0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.total_variation(self._logu).eval(),
0.5 * np.abs(self._u - 1))
class PearsonTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.pearson(0.).eval(), 0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.pearson(self._logu).eval(),
np.square(self._u - 1))
class SquaredHellingerTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.squared_hellinger(0.).eval(), 0.)
def test_symmetric(self):
with self.test_session():
self.assertAllClose(
cd.squared_hellinger(self._logu).eval(),
cd.symmetrized_csiszar_function(
self._logu, cd.squared_hellinger).eval())
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.squared_hellinger(self._logu).eval(),
np.square(np.sqrt(self._u) - 1))
class TriangularTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.triangular(0.).eval(), 0.)
def test_symmetric(self):
with self.test_session():
self.assertAllClose(
cd.triangular(self._logu).eval(),
cd.symmetrized_csiszar_function(
self._logu, cd.triangular).eval())
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.triangular(self._logu).eval(),
np.square(self._u - 1) / (1 + self._u))
class TPowerTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.t_power(0., t=-0.1).eval(), 0.)
self.assertAllClose(cd.t_power(0., t=0.5).eval(), 0.)
self.assertAllClose(cd.t_power(0., t=1.1).eval(), 0.)
self.assertAllClose(
cd.t_power(0., t=-0.1, self_normalized=True).eval(), 0.)
self.assertAllClose(
cd.t_power(0., t=0.5, self_normalized=True).eval(), 0.)
self.assertAllClose(
cd.t_power(0., t=1.1, self_normalized=True).eval(), 0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.t_power(self._logu, t=np.float64(-0.1)).eval(),
self._u ** -0.1 - 1.)
self.assertAllClose(
cd.t_power(self._logu, t=np.float64(0.5)).eval(),
-self._u ** 0.5 + 1.)
self.assertAllClose(
cd.t_power(self._logu, t=np.float64(1.1)).eval(),
self._u ** 1.1 - 1.)
def test_correct_self_normalized(self):
with self.test_session():
self.assertAllClose(
cd.t_power(self._logu, t=np.float64(-0.1),
self_normalized=True).eval(),
self._u ** -0.1 - 1. + 0.1 * (self._u - 1.))
self.assertAllClose(
cd.t_power(self._logu, t=np.float64(0.5),
self_normalized=True).eval(),
-self._u ** 0.5 + 1. + 0.5 * (self._u - 1.))
self.assertAllClose(
cd.t_power(self._logu, t=np.float64(1.1),
self_normalized=True).eval(),
self._u ** 1.1 - 1. - 1.1 * (self._u - 1.))
class Log1pAbsTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.log1p_abs(0.).eval(), 0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.log1p_abs(self._logu).eval(),
self._u**(np.sign(self._u - 1)) - 1)
class JeffreysTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.jeffreys(0.).eval(), 0.)
def test_symmetric(self):
with self.test_session():
self.assertAllClose(
cd.jeffreys(self._logu).eval(),
cd.symmetrized_csiszar_function(
self._logu, cd.jeffreys).eval())
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.jeffreys(self._logu).eval(),
0.5 * (self._u * self._logu - self._logu))
class ChiSquareTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(cd.chi_square(0.).eval(), 0.)
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.chi_square(self._logu).eval(),
self._u**2 - 1)
class ModifiedGanTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10, 100)
self._u = np.exp(self._logu)
def test_at_zero(self):
with self.test_session():
self.assertAllClose(
cd.modified_gan(0.).eval(), np.log(2))
self.assertAllClose(
cd.modified_gan(0., self_normalized=True).eval(), np.log(2))
def test_correct(self):
with self.test_session():
self.assertAllClose(
cd.modified_gan(self._logu).eval(),
np.log1p(self._u) - self._logu)
self.assertAllClose(
cd.modified_gan(self._logu, self_normalized=True).eval(),
np.log1p(self._u) - self._logu + 0.5 * (self._u - 1))
class SymmetrizedCsiszarFunctionTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10., 100)
self._u = np.exp(self._logu)
def test_jensen_shannon(self):
with self.test_session():
# The following functions come from the claim made in the
# symmetrized_csiszar_function docstring.
def js1(logu):
return (-logu
- (1. + math_ops.exp(logu)) * (
nn_ops.softplus(logu)))
def js2(logu):
return 2. * (math_ops.exp(logu) * (
logu - nn_ops.softplus(logu)))
self.assertAllClose(
cd.symmetrized_csiszar_function(self._logu, js1).eval(),
cd.jensen_shannon(self._logu).eval())
self.assertAllClose(
cd.symmetrized_csiszar_function(self._logu, js2).eval(),
cd.jensen_shannon(self._logu).eval())
def test_jeffreys(self):
with self.test_session():
self.assertAllClose(
cd.symmetrized_csiszar_function(self._logu, cd.kl_reverse).eval(),
cd.jeffreys(self._logu).eval())
self.assertAllClose(
cd.symmetrized_csiszar_function(self._logu, cd.kl_forward).eval(),
cd.jeffreys(self._logu).eval())
class DualCsiszarFunctionTest(test.TestCase):
def setUp(self):
self._logu = np.linspace(-10., 10., 100)
self._u = np.exp(self._logu)
def test_kl_forward(self):
with self.test_session():
self.assertAllClose(
cd.dual_csiszar_function(self._logu, cd.kl_forward).eval(),
cd.kl_reverse(self._logu).eval())
def test_kl_reverse(self):
with self.test_session():
self.assertAllClose(
cd.dual_csiszar_function(self._logu, cd.kl_reverse).eval(),
cd.kl_forward(self._logu).eval())
class MonteCarloCsiszarFDivergenceTest(test.TestCase):
def test_kl_forward(self):
with self.test_session() as sess:
q = normal_lib.Normal(
loc=np.ones(6),
scale=np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0]))
p = normal_lib.Normal(loc=q.loc + 0.1, scale=q.scale - 0.2)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_forward,
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_forward(logu, self_normalized=True),
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(p, q)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.08, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.02, atol=0.)
def test_kl_reverse(self):
with self.test_session() as sess:
q = normal_lib.Normal(
loc=np.ones(6),
scale=np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0]))
p = normal_lib.Normal(loc=q.loc + 0.1, scale=q.scale - 0.2)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(q, p)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.07, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.02, atol=0.)
def test_kl_reverse_multidim(self):
with self.test_session() as sess:
d = 5 # Dimension
p = mvn_full_lib.MultivariateNormalFullCovariance(
covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))
q = mvn_diag_lib.MultivariateNormalDiag(scale_diag=[0.5]*d)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(q, p)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.02, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.08, atol=0.)
def test_kl_forward_multidim(self):
with self.test_session() as sess:
d = 5 # Dimension
p = mvn_full_lib.MultivariateNormalFullCovariance(
covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))
# Variance is very high when approximating Forward KL, so we make
# scale_diag larger than in test_kl_reverse_multidim. This ensures q
# "covers" p and thus Var_q[p/q] is smaller.
q = mvn_diag_lib.MultivariateNormalDiag(scale_diag=[1.]*d)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_forward,
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_forward(logu, self_normalized=True),
p_log_prob=p.log_prob,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(p, q)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.06, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.05, atol=0.)
def test_score_trick(self):
with self.test_session() as sess:
d = 5 # Dimension
num_draws = int(1e5)
seed = 1
p = mvn_full_lib.MultivariateNormalFullCovariance(
covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))
# Variance is very high when approximating Forward KL, so we make
# scale_diag larger than in test_kl_reverse_multidim. This ensures q
# "covers" p and thus Var_q[p/q] is smaller.
s = array_ops.constant(1.)
q = mvn_diag_lib.MultivariateNormalDiag(
scale_diag=array_ops.tile([s], [d]))
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p_log_prob=p.log_prob,
q=q,
num_draws=num_draws,
seed=seed)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p_log_prob=p.log_prob,
q=q,
num_draws=num_draws,
seed=seed)
approx_kl_score_trick = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p_log_prob=p.log_prob,
q=q,
num_draws=num_draws,
use_reparametrization=False,
seed=seed)
approx_kl_self_normalized_score_trick = (
cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p_log_prob=p.log_prob,
q=q,
num_draws=num_draws,
use_reparametrization=False,
seed=seed))
exact_kl = kullback_leibler.kl_divergence(q, p)
grad_sum = lambda fs: gradients_impl.gradients(fs, s)[0]
[
approx_kl_grad_,
approx_kl_self_normalized_grad_,
approx_kl_score_trick_grad_,
approx_kl_self_normalized_score_trick_grad_,
exact_kl_grad_,
approx_kl_,
approx_kl_self_normalized_,
approx_kl_score_trick_,
approx_kl_self_normalized_score_trick_,
exact_kl_,
] = sess.run([
grad_sum(approx_kl),
grad_sum(approx_kl_self_normalized),
grad_sum(approx_kl_score_trick),
grad_sum(approx_kl_self_normalized_score_trick),
grad_sum(exact_kl),
approx_kl,
approx_kl_self_normalized,
approx_kl_score_trick,
approx_kl_self_normalized_score_trick,
exact_kl,
])
# Test average divergence.
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.02, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.08, atol=0.)
self.assertAllClose(approx_kl_score_trick_, exact_kl_,
rtol=0.02, atol=0.)
self.assertAllClose(approx_kl_self_normalized_score_trick_, exact_kl_,
rtol=0.08, atol=0.)
# Test average gradient-divergence.
self.assertAllClose(approx_kl_grad_, exact_kl_grad_,
rtol=0.007, atol=0.)
self.assertAllClose(approx_kl_self_normalized_grad_, exact_kl_grad_,
rtol=0.011, atol=0.)
self.assertAllClose(approx_kl_score_trick_grad_, exact_kl_grad_,
rtol=0.018, atol=0.)
self.assertAllClose(
approx_kl_self_normalized_score_trick_grad_, exact_kl_grad_,
rtol=0.017, atol=0.)
class CsiszarVIMCOTest(test.TestCase):
def _numpy_csiszar_vimco_helper(self, logu):
"""Numpy implementation of `csiszar_vimco_helper`."""
n = logu.shape[0]
u = np.exp(logu)
loogeoavg_u = [] # Leave-one-out geometric-average of exp(logu).
for j in range(n):
loogeoavg_u.append(np.exp(np.mean(
[logu[i, ...] for i in range(n) if i != j],
axis=0)))
loogeoavg_u = np.array(loogeoavg_u)
loosum_u = [] # Leave-one-out sum of exp(logu).
for j in range(n):
loosum_u.append(np.sum(
[u[i, ...] for i in range(n) if i != j],
axis=0))
loosum_u = np.array(loosum_u)
# Natural log of the average u except each is swapped-out for its
# leave-`i`-th-out Geometric average.
log_sooavg_u = np.log(loosum_u + loogeoavg_u) - np.log(n)
log_avg_u = np.log(np.mean(u, axis=0))
return log_avg_u, log_sooavg_u
def test_vimco_helper(self):
with self.test_session() as sess:
logu = np.linspace(-20, 20, 100)
np_log_avg_u, np_log_sooavg_u = self._numpy_csiszar_vimco_helper(logu)
[log_avg_u, log_sooavg_u] = sess.run(cd.csiszar_vimco_helper(logu))
self.assertAllClose(np_log_avg_u, log_avg_u,
rtol=1e-2, atol=0.)
self.assertAllClose(np_log_sooavg_u, log_sooavg_u,
rtol=1e-2, atol=0.)
def test_vimco_helper_gradient(self):
with self.test_session():
logu = array_ops.constant(
np.linspace(-1e2, 100., 100).reshape([50, 2]))
log_avg_u, log_sooavg_u = cd.csiszar_vimco_helper(logu)
g = gradients_impl.gradients(log_avg_u - log_sooavg_u, logu)[0].eval()
self.assertAllEqual(np.ones_like(g, dtype=np.bool), np.isfinite(g))
self.assertAllEqual(np.ones_like(g, dtype=np.bool), g != 0.)
def test_vimco_and_gradient(self):
with self.test_session() as sess:
dims = 5 # Dimension
num_draws = int(20)
num_batch_draws = int(3)
seed = 1
f = lambda logu: cd.kl_reverse(logu, self_normalized=False)
np_f = lambda logu: -logu
p = mvn_full_lib.MultivariateNormalFullCovariance(
covariance_matrix=tridiag(dims, diag_value=1, offdiag_value=0.5))
# Variance is very high when approximating Forward KL, so we make
# scale_diag larger than in test_kl_reverse_multidim. This ensures q
# "covers" p and thus Var_q[p/q] is smaller.
s = array_ops.constant(1.)
q = mvn_diag_lib.MultivariateNormalDiag(
scale_diag=array_ops.tile([s], [dims]))
vimco = cd.csiszar_vimco(
f=f,
p_log_prob=p.log_prob,
q=q,
num_draws=num_draws,
num_batch_draws=num_batch_draws,
seed=seed)
x = q.sample(sample_shape=[num_draws, num_batch_draws],
seed=seed)
x = array_ops.stop_gradient(x)
logu = p.log_prob(x) - q.log_prob(x)
f_log_sum_u = f(cd.csiszar_vimco_helper(logu)[0])
grad_sum = lambda fs: gradients_impl.gradients(fs, s)[0]
def jacobian(x):
# Warning: this function is slow and may not even finish if prod(shape)
# is larger than, say, 100.
shape = x.shape.as_list()
assert all(s is not None for s in shape)
x = array_ops.reshape(x, shape=[-1])
r = [grad_sum(x[i]) for i in range(np.prod(shape))]
return array_ops.reshape(array_ops.stack(r), shape=shape)
[
logu_,
jacobian_logqx_,
vimco_,
grad_vimco_,
f_log_sum_u_,
grad_mean_f_log_sum_u_,
] = sess.run([
logu,
jacobian(q.log_prob(x)),
vimco,
grad_sum(vimco),
f_log_sum_u,
grad_sum(f_log_sum_u) / num_batch_draws,
])
np_log_avg_u, np_log_sooavg_u = self._numpy_csiszar_vimco_helper(logu_)
# Test VIMCO loss is correct.
self.assertAllClose(np_f(np_log_avg_u).mean(axis=0), vimco_,
rtol=1e-5, atol=0.)
# Test gradient of VIMCO loss is correct.
#
# To make this computation we'll inject two gradients from TF:
# - grad[mean(f(log(sum(p(x)/q(x)))))]
# - jacobian[log(q(x))].
#
# We now justify why using these (and only these) TF values for
# ground-truth does not undermine the completeness of this test.
#
# Regarding `grad_mean_f_log_sum_u_`, note that we validate the
# correctness of the zero-th order derivative (for each batch member).
# Since `cd.csiszar_vimco_helper` itself does not manipulate any gradient
# information, we can safely rely on TF.
self.assertAllClose(np_f(np_log_avg_u), f_log_sum_u_, rtol=1e-4, atol=0.)
#
# Regarding `jacobian_logqx_`, note that testing the gradient of
# `q.log_prob` is outside the scope of this unit-test thus we may safely
# use TF to find it.
# The `mean` is across batches and the `sum` is across iid samples.
np_grad_vimco = (
grad_mean_f_log_sum_u_
+ np.mean(
np.sum(
jacobian_logqx_ * (np_f(np_log_avg_u)
- np_f(np_log_sooavg_u)),
axis=0),
axis=0))
self.assertAllClose(np_grad_vimco, grad_vimco_,
rtol=1e-5, atol=0.)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006 (dv)
# Tamas Pal, 2007 (folti)
# Nicolas Mercier, 2009
# Matt Clarkson, 2012
"""
Microsoft Visual C++/Intel C++ compiler support
Usage::
$ waf configure --msvc_version="msvc 10.0,msvc 9.0" --msvc_target="x64"
or::
def configure(conf):
conf.env['MSVC_VERSIONS'] = ['msvc 10.0', 'msvc 9.0', 'msvc 8.0', 'msvc 7.1', 'msvc 7.0', 'msvc 6.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0']
conf.env['MSVC_TARGETS'] = ['x64']
conf.load('msvc')
or::
def configure(conf):
conf.load('msvc', funs='no_autodetect')
conf.check_lib_msvc('gdi32')
conf.check_libs_msvc('kernel32 user32')
def build(bld):
tg = bld.program(source='main.c', target='app', use='KERNEL32 USER32 GDI32')
Platforms and targets will be tested in the order they appear;
the first good configuration will be used.
Supported platforms: ia64, x64, x86, x86_amd64, x86_ia64
Compilers supported:
* msvc => Visual Studio, versions 6.0 (VC 98, VC .NET 2002) to 11.0 (Visual Studio 2012)
* wsdk => Windows SDK, versions 6.0, 6.1, 7.0, 7.1, 8.0
* icl => Intel compiler, versions 9, 10, 11, 13
* winphone => Visual Studio to target Windows Phone 8 native (version 8.0 for now)
* Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i)
* PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i)
To use WAF in a VS2008 Make file project (see http://code.google.com/p/waf/issues/detail?id=894)
You may consider to set the environment variable "VS_UNICODE_OUTPUT" to nothing before calling waf.
So in your project settings use something like 'cmd.exe /C "set VS_UNICODE_OUTPUT=& set PYTHONUNBUFFERED=true & waf build"'.
cmd.exe /C "chcp 1252 & set PYTHONUNBUFFERED=true && set && waf configure"
Setting PYTHONUNBUFFERED gives the unbuffered output.
"""
import os, sys, re, tempfile
from waflib import Utils, Task, Logs, Options, Errors
from waflib.Logs import debug, warn
from waflib.TaskGen import after_method, feature
from waflib.Configure import conf
from waflib.Tools import ccroot, c, cxx, ar, winres
g_msvc_systemlibs = '''
aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet
cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs
credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d
ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp
faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid
gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop
kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi
mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree
msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm
netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp
odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32
osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu
ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm
rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32
shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32
traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg
version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm
wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp
'''.split()
"""importlibs provided by MSVC/Platform SDK. Do NOT search them"""
all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64'), ('x86_arm', 'arm') ]
"""List of msvc platforms"""
all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ]
"""List of wince platforms"""
all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')]
"""List of icl platforms"""
def options(opt):
opt.add_option('--msvc_version', type='string', help = 'msvc version, eg: "msvc 10.0,msvc 9.0"', default='')
opt.add_option('--msvc_targets', type='string', help = 'msvc targets, eg: "x64,arm"', default='')
def setup_msvc(conf, versions, arch = False):
platforms = getattr(Options.options, 'msvc_targets', '').split(',')
if platforms == ['']:
platforms=Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms]
desired_versions = getattr(Options.options, 'msvc_version', '').split(',')
if desired_versions == ['']:
desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
versiondict = dict(versions)
for version in desired_versions:
try:
targets = dict(versiondict [version])
for target in platforms:
try:
arch,(p1,p2,p3) = targets[target]
compiler,revision = version.rsplit(' ', 1)
if arch:
return compiler,revision,p1,p2,p3,arch
else:
return compiler,revision,p1,p2,p3
except KeyError: continue
except KeyError: continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
@conf
def get_msvc_version(conf, compiler, version, target, vcvars):
"""
Create a bat file to obtain the location of the libraries
:param compiler: ?
:param version: ?
:target: ?
:vcvars: ?
:return: the location of msvc, the location of include dirs, and the library paths
:rtype: tuple of strings
"""
debug('msvc: get_msvc_version: %r %r %r', compiler, version, target)
batfile = conf.bldnode.make_node('waf-print-msvc.bat')
batfile.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%;%%LIBPATH%%
""" % (vcvars,target))
sout = conf.cmd_and_log(['cmd', '/E:on', '/V:on', '/C', batfile.abspath()])
lines = sout.splitlines()
if not lines[0]:
lines.pop(0)
MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None
for line in lines:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR):
conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)')
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = dict(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
cxx = conf.find_program(compiler_name, path_list=MSVC_PATH)
cxx = conf.cmd_to_list(cxx)
# delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
if 'CL' in env:
del(env['CL'])
try:
try:
conf.cmd_and_log(cxx + ['/help'], env=env)
except Exception as e:
debug('msvc: get_msvc_version: %r %r %r -> failure' % (compiler, version, target))
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target)
finally:
conf.env[compiler_name] = ''
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
@conf
def gather_wsdk_versions(conf, versions):
"""
Use winreg to add the msvc versions to the input list
:param versions: list to modify
:type versions: list
"""
version_pattern = re.compile('^v..?.?\...?.?')
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = Utils.winreg.OpenKey(all_versions, version)
path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
targets = []
for target,arch in all_msvc_platforms:
try:
targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')))))
except conf.errors.ConfigurationError:
pass
versions.append(('wsdk ' + version[1:], targets))
def gather_wince_supported_platforms():
"""
Checks SmartPhones SDKs
:param versions: list to modify
:type versions: list
"""
supported_wince_platforms = []
try:
ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
try:
ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
ce_sdk = ''
if not ce_sdk:
return supported_wince_platforms
ce_index = 0
while 1:
try:
sdk_device = Utils.winreg.EnumKey(ce_sdk, ce_index)
except WindowsError:
break
ce_index = ce_index + 1
sdk = Utils.winreg.OpenKey(ce_sdk, sdk_device)
try:
path,type = Utils.winreg.QueryValueEx(sdk, 'SDKRootDir')
except WindowsError:
try:
path,type = Utils.winreg.QueryValueEx(sdk,'SDKInformation')
path,xml = os.path.split(path)
except WindowsError:
continue
path=str(path)
path,device = os.path.split(path)
if not device:
path,device = os.path.split(path)
for arch,compiler in all_wince_platforms:
platforms = []
if os.path.isdir(os.path.join(path, device, 'Lib', arch)):
platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch)))
if platforms:
supported_wince_platforms.append((device, platforms))
return supported_wince_platforms
def gather_msvc_detected_versions():
#Detected MSVC versions!
version_pattern = re.compile('^(\d\d?\.\d\d?)(Exp)?$')
detected_versions = []
for vcver,vcvar in [('VCExpress','Exp'), ('VisualStudio','')]:
try:
prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix)
except WindowsError:
try:
prefix = 'SOFTWARE\\Microsoft\\'+vcver
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix)
except WindowsError:
continue
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
match = version_pattern.match(version)
if not match:
continue
else:
versionnumber = float(match.group(1))
detected_versions.append((versionnumber, version+vcvar, prefix+"\\"+version))
def fun(tup):
return tup[0]
detected_versions.sort(key = fun)
return detected_versions
@conf
def gather_msvc_targets(conf, versions, version, vc_path):
#Looking for normal MSVC compilers!
targets = []
if os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, os.path.join(vc_path, 'vcvarsall.bat')))))
except conf.errors.ConfigurationError:
pass
elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')))))
except conf.errors.ConfigurationError:
pass
elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, '', os.path.join(vc_path, 'Bin', 'vcvars32.bat')))))
except conf.errors.ConfigurationError:
pass
if targets:
versions.append(('msvc '+ version, targets))
@conf
def gather_wince_targets(conf, versions, version, vc_path, vsvars, supported_platforms):
#Looking for Win CE compilers!
for device,platforms in supported_platforms:
cetargets = []
for platform,compiler,include,lib in platforms:
winCEpath = os.path.join(vc_path, 'ce')
if not os.path.isdir(winCEpath):
continue
try:
common_bindirs,_1,_2 = conf.get_msvc_version('msvc', version, 'x86', vsvars)
except conf.errors.ConfigurationError:
continue
if os.path.isdir(os.path.join(winCEpath, 'lib', platform)):
bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + common_bindirs
incdirs = [os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include'), include]
libdirs = [os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform), lib]
cetargets.append((platform, (platform, (bindirs,incdirs,libdirs))))
if cetargets:
versions.append((device + ' ' + version, cetargets))
@conf
def gather_winphone_targets(conf, versions, version, vc_path, vsvars):
#Looking for WinPhone compilers
targets = []
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('winphone', version, target, vsvars))))
except conf.errors.ConfigurationError as e:
pass
if targets:
versions.append(('winphone '+ version, targets))
@conf
def gather_msvc_versions(conf, versions):
vc_paths = []
for (v,version,reg) in gather_msvc_detected_versions():
try:
try:
msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC")
except WindowsError:
msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++")
path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir')
vc_paths.append((version, os.path.abspath(str(path))))
except WindowsError:
continue
wince_supported_platforms = gather_wince_supported_platforms()
for version,vc_path in vc_paths:
vs_path = os.path.dirname(vc_path)
vsvars = os.path.join(vs_path, 'Common7', 'Tools', 'vsvars32.bat')
if wince_supported_platforms and os.path.isfile(vsvars):
conf.gather_wince_targets(versions, version, vc_path, vsvars, wince_supported_platforms)
vsvars = os.path.join(vs_path, 'VC', 'WPSDK', 'WP80', 'vcvarsphoneall.bat')
if os.path.isfile(vsvars):
conf.gather_winphone_targets(versions, '8.0', vc_path, vsvars)
for version,vc_path in vc_paths:
vs_path = os.path.dirname(vc_path)
conf.gather_msvc_targets(versions, version, vc_path)
@conf
def gather_icl_versions(conf, versions):
"""
Checks ICL compilers
:param versions: list to modify
:type versions: list
"""
version_pattern = re.compile('^...?.?\....?.?')
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
except WindowsError:
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++')
except WindowsError:
return
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
targets = []
for target,arch in all_icl_platforms:
try:
if target=='intel64': targetDir='EM64T_NATIVE'
else: targetDir=target
Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir)
icl_version=Utils.winreg.OpenKey(all_versions,version)
path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir')
batch_file=os.path.join(path,'bin','iclvars.bat')
if os.path.isfile(batch_file):
try:
targets.append((target,(arch,conf.get_msvc_version('intel',version,target,batch_file))))
except conf.errors.ConfigurationError:
pass
except WindowsError:
pass
for target,arch in all_icl_platforms:
try:
icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target)
path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir')
batch_file=os.path.join(path,'bin','iclvars.bat')
if os.path.isfile(batch_file):
try:
targets.append((target, (arch, conf.get_msvc_version('intel', version, target, batch_file))))
except conf.errors.ConfigurationError:
pass
except WindowsError:
continue
major = version[0:2]
versions.append(('intel ' + major, targets))
@conf
def gather_intel_composer_versions(conf, versions):
"""
Checks ICL compilers that are part of Intel Composer Suites
:param versions: list to modify
:type versions: list
"""
version_pattern = re.compile('^...?.?\...?.?.?')
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Suites')
except WindowsError:
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Suites')
except WindowsError:
return
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
targets = []
for target,arch in all_icl_platforms:
try:
if target=='intel64': targetDir='EM64T_NATIVE'
else: targetDir=target
try:
defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\'+targetDir)
except WindowsError:
if targetDir=='EM64T_NATIVE':
defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\EM64T')
else:
raise WindowsError
uid,type = Utils.winreg.QueryValueEx(defaults, 'SubKey')
Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++\\'+targetDir)
icl_version=Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++')
path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir')
batch_file=os.path.join(path,'bin','iclvars.bat')
if os.path.isfile(batch_file):
try:
targets.append((target,(arch,conf.get_msvc_version('intel',version,target,batch_file))))
except conf.errors.ConfigurationError as e:
pass
# The intel compilervar_arch.bat is broken when used with Visual Studio Express 2012
# http://software.intel.com/en-us/forums/topic/328487
compilervars_warning_attr = '_compilervars_warning_key'
if version[0:2] == '13' and getattr(conf, compilervars_warning_attr, True):
setattr(conf, compilervars_warning_attr, False)
patch_url = 'http://software.intel.com/en-us/forums/topic/328487'
compilervars_arch = os.path.join(path, 'bin', 'compilervars_arch.bat')
for vscomntools in ['VS110COMNTOOLS', 'VS100COMNTOOLS']:
if os.environ.has_key (vscomntools):
vs_express_path = os.environ[vscomntools] + r'..\IDE\VSWinExpress.exe'
dev_env_path = os.environ[vscomntools] + r'..\IDE\devenv.exe'
if (r'if exist "%VS110COMNTOOLS%..\IDE\VSWinExpress.exe"' in Utils.readf(compilervars_arch) and
not os.path.exists(vs_express_path) and not os.path.exists(dev_env_path)):
Logs.warn(('The Intel compilervar_arch.bat only checks for one Visual Studio SKU '
'(VSWinExpress.exe) but it does not seem to be installed at %r. '
'The intel command line set up will fail to configure unless the file %r'
'is patched. See: %s') % (vs_express_path, compilervars_arch, patch_url))
except WindowsError:
pass
major = version[0:2]
versions.append(('intel ' + major, targets))
@conf
def get_msvc_versions(conf):
"""
:return: list of compilers installed
:rtype: list of string
"""
if not conf.env['MSVC_INSTALLED_VERSIONS']:
lst = []
conf.gather_icl_versions(lst)
conf.gather_intel_composer_versions(lst)
conf.gather_wsdk_versions(lst)
conf.gather_msvc_versions(lst)
conf.env['MSVC_INSTALLED_VERSIONS'] = lst
return conf.env['MSVC_INSTALLED_VERSIONS']
@conf
def print_all_msvc_detected(conf):
"""
Print the contents of *conf.env.MSVC_INSTALLED_VERSIONS*
"""
for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']:
Logs.info(version)
for target,l in targets:
Logs.info("\t"+target)
@conf
def detect_msvc(conf, arch = False):
versions = get_msvc_versions(conf)
return setup_msvc(conf, versions, arch)
@conf
def find_lt_names_msvc(self, libname, is_static=False):
"""
Win32/MSVC specific code to glean out information from libtool la files.
this function is not attached to the task_gen class
"""
lt_names=[
'lib%s.la' % libname,
'%s.la' % libname,
]
for path in self.env['LIBPATH']:
for la in lt_names:
laf=os.path.join(path,la)
dll=None
if os.path.exists(laf):
ltdict = Utils.read_la_file(laf)
lt_libdir=None
if ltdict.get('libdir', ''):
lt_libdir = ltdict['libdir']
if not is_static and ltdict.get('library_names', ''):
dllnames=ltdict['library_names'].split()
dll=dllnames[0].lower()
dll=re.sub('\.dll$', '', dll)
return (lt_libdir, dll, False)
elif ltdict.get('old_library', ''):
olib=ltdict['old_library']
if os.path.exists(os.path.join(path,olib)):
return (path, olib, True)
elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)):
return (lt_libdir, olib, True)
else:
return (None, olib, True)
else:
raise self.errors.WafError('invalid libtool object file: %s' % laf)
return (None, None, None)
@conf
def libname_msvc(self, libname, is_static=False):
lib = libname.lower()
lib = re.sub('\.lib$','',lib)
if lib in g_msvc_systemlibs:
return lib
lib=re.sub('^lib','',lib)
if lib == 'm':
return None
(lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static)
if lt_path != None and lt_libname != None:
if lt_static == True:
# file existance check has been made by find_lt_names
return os.path.join(lt_path,lt_libname)
if lt_path != None:
_libpaths=[lt_path] + self.env['LIBPATH']
else:
_libpaths=self.env['LIBPATH']
static_libs=[
'lib%ss.lib' % lib,
'lib%s.lib' % lib,
'%ss.lib' % lib,
'%s.lib' %lib,
]
dynamic_libs=[
'lib%s.dll.lib' % lib,
'lib%s.dll.a' % lib,
'%s.dll.lib' % lib,
'%s.dll.a' % lib,
'lib%s_d.lib' % lib,
'%s_d.lib' % lib,
'%s.lib' %lib,
]
libnames=static_libs
if not is_static:
libnames=dynamic_libs + static_libs
for path in _libpaths:
for libn in libnames:
if os.path.exists(os.path.join(path, libn)):
debug('msvc: lib found: %s' % os.path.join(path,libn))
return re.sub('\.lib$', '',libn)
#if no lib can be found, just return the libname as msvc expects it
self.fatal("The library %r could not be found" % libname)
return re.sub('\.lib$', '', libname)
@conf
def check_lib_msvc(self, libname, is_static=False, uselib_store=None):
"""
Ideally we should be able to place the lib in the right env var, either STLIB or LIB,
but we don't distinguish static libs from shared libs.
This is ok since msvc doesn't have any special linker flag to select static libs (no env['STLIB_MARKER'])
"""
libn = self.libname_msvc(libname, is_static)
if not uselib_store:
uselib_store = libname.upper()
if False and is_static: # disabled
self.env['STLIB_' + uselib_store] = [libn]
else:
self.env['LIB_' + uselib_store] = [libn]
@conf
def check_libs_msvc(self, libnames, is_static=False):
for libname in Utils.to_list(libnames):
self.check_lib_msvc(libname, is_static)
def configure(conf):
"""
Configuration methods to call for detecting msvc
"""
conf.autodetect(True)
conf.find_msvc()
conf.msvc_common_flags()
conf.cc_load_tools()
conf.cxx_load_tools()
conf.cc_add_flags()
conf.cxx_add_flags()
conf.link_add_flags()
conf.visual_studio_add_flags()
@conf
def no_autodetect(conf):
conf.env.NO_MSVC_DETECT = 1
configure(conf)
@conf
def autodetect(conf, arch = False):
v = conf.env
if v.NO_MSVC_DETECT:
return
if arch:
compiler, version, path, includes, libdirs, arch = conf.detect_msvc(True)
v['DEST_CPU'] = arch
else:
compiler, version, path, includes, libdirs = conf.detect_msvc()
v['PATH'] = path
v['INCLUDES'] = includes
v['LIBPATH'] = libdirs
v['MSVC_COMPILER'] = compiler
try:
v['MSVC_VERSION'] = float(version)
except Exception:
v['MSVC_VERSION'] = float(version[:-3])
def _get_prog_names(conf, compiler):
if compiler=='intel':
compiler_name = 'ICL'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conf
def find_msvc(conf):
"""Due to path format limitations, limit operation only to native Win32. Yeah it sucks."""
if sys.platform == 'cygwin':
conf.fatal('MSVC module does not work under cygwin Python!')
# the autodetection is supposed to be performed before entering in this method
v = conf.env
path = v['PATH']
compiler = v['MSVC_COMPILER']
version = v['MSVC_VERSION']
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11)
# compiler
cxx = None
if v['CXX']: cxx = v['CXX']
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
cxx = conf.find_program(compiler_name, var='CXX', path_list=path)
cxx = conf.cmd_to_list(cxx)
# before setting anything, check if the compiler is really msvc
env = dict(conf.environ)
if path: env.update(PATH = ';'.join(path))
if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env):
conf.fatal('the msvc compiler could not be identified')
# c/c++ compiler
v['CC'] = v['CXX'] = cxx
v['CC_NAME'] = v['CXX_NAME'] = 'msvc'
# linker
if not v['LINK_CXX']:
link = conf.find_program(linker_name, path_list=path)
if link: v['LINK_CXX'] = link
else: conf.fatal('%s was not found (linker)' % linker_name)
v['LINK'] = link
if not v['LINK_CC']:
v['LINK_CC'] = v['LINK_CXX']
# staticlib linker
if not v['AR']:
stliblink = conf.find_program(lib_name, path_list=path, var='AR')
if not stliblink: return
v['ARFLAGS'] = ['/NOLOGO']
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
if v.MSVC_MANIFEST:
conf.find_program('MT', path_list=path, var='MT')
v['MTFLAGS'] = ['/NOLOGO']
try:
conf.load('winres')
except Errors.WafError:
warn('Resource compiler not found. Compiling resource file is disabled')
@conf
def visual_studio_add_flags(self):
"""visual studio flags found in the system environment"""
v = self.env
try: v.prepend_value('INCLUDES', [x for x in self.environ['INCLUDE'].split(';') if x]) # notice the 'S'
except Exception: pass
try: v.prepend_value('LIBPATH', [x for x in self.environ['LIB'].split(';') if x])
except Exception: pass
@conf
def msvc_common_flags(conf):
"""
Setup the flags required for executing the msvc compiler
"""
v = conf.env
v['DEST_BINFMT'] = 'pe'
v.append_value('CFLAGS', ['/nologo'])
v.append_value('CXXFLAGS', ['/nologo'])
v['DEFINES_ST'] = '/D%s'
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['/c', '/Fo']
if v['MSVC_VERSION'] >= 8:
v['CC_TGT_F']= ['/FC'] + v['CC_TGT_F']
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['/c', '/Fo']
if v['MSVC_VERSION'] >= 8:
v['CXX_TGT_F']= ['/FC'] + v['CXX_TGT_F']
v['CPPPATH_ST'] = '/I%s' # template for adding include paths
v['AR_TGT_F'] = v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:'
# Subsystem specific flags
v['CFLAGS_CONSOLE'] = v['CXXFLAGS_CONSOLE'] = ['/SUBSYSTEM:CONSOLE']
v['CFLAGS_NATIVE'] = v['CXXFLAGS_NATIVE'] = ['/SUBSYSTEM:NATIVE']
v['CFLAGS_POSIX'] = v['CXXFLAGS_POSIX'] = ['/SUBSYSTEM:POSIX']
v['CFLAGS_WINDOWS'] = v['CXXFLAGS_WINDOWS'] = ['/SUBSYSTEM:WINDOWS']
v['CFLAGS_WINDOWSCE'] = v['CXXFLAGS_WINDOWSCE'] = ['/SUBSYSTEM:WINDOWSCE']
# CRT specific flags
v['CFLAGS_CRT_MULTITHREADED'] = v['CXXFLAGS_CRT_MULTITHREADED'] = ['/MT']
v['CFLAGS_CRT_MULTITHREADED_DLL'] = v['CXXFLAGS_CRT_MULTITHREADED_DLL'] = ['/MD']
v['CFLAGS_CRT_MULTITHREADED_DBG'] = v['CXXFLAGS_CRT_MULTITHREADED_DBG'] = ['/MTd']
v['CFLAGS_CRT_MULTITHREADED_DLL_DBG'] = v['CXXFLAGS_CRT_MULTITHREADED_DLL_DBG'] = ['/MDd']
# linker
v['LIB_ST'] = '%s.lib' # template for adding shared libs
v['LIBPATH_ST'] = '/LIBPATH:%s' # template for adding libpaths
v['STLIB_ST'] = '%s.lib'
v['STLIBPATH_ST'] = '/LIBPATH:%s'
v.append_value('LINKFLAGS', ['/NOLOGO'])
if v['MSVC_MANIFEST']:
v.append_value('LINKFLAGS', ['/MANIFEST'])
# shared library
v['CFLAGS_cshlib'] = []
v['CXXFLAGS_cxxshlib'] = []
v['LINKFLAGS_cshlib'] = v['LINKFLAGS_cxxshlib'] = ['/DLL']
v['cshlib_PATTERN'] = v['cxxshlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = '%s.lib'
v['IMPLIB_ST'] = '/IMPLIB:%s'
# static library
v['LINKFLAGS_cstlib'] = []
v['cstlib_PATTERN'] = v['cxxstlib_PATTERN'] = '%s.lib'
# program
v['cprogram_PATTERN'] = v['cxxprogram_PATTERN'] = '%s.exe'
#######################################################################################################
##### conf above, build below
@after_method('apply_link')
@feature('c', 'cxx')
def apply_flags_msvc(self):
"""
Add additional flags implied by msvc, such as subsystems and pdb files::
def build(bld):
bld.stlib(source='main.c', target='bar', subsystem='gruik')
"""
if self.env.CC_NAME != 'msvc' or not getattr(self, 'link_task', None):
return
is_static = isinstance(self.link_task, ccroot.stlink_task)
subsystem = getattr(self, 'subsystem', '')
if subsystem:
subsystem = '/subsystem:%s' % subsystem
flags = is_static and 'ARFLAGS' or 'LINKFLAGS'
self.env.append_value(flags, subsystem)
if not is_static:
for f in self.env.LINKFLAGS:
d = f.lower()
if d[1:] == 'debug':
pdbnode = self.link_task.outputs[0].change_ext('.pdb')
self.link_task.outputs.append(pdbnode)
try:
self.install_task.source.append(pdbnode)
except AttributeError:
pass
break
# split the manifest file processing from the link task, like for the rc processing
@feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib')
@after_method('apply_link')
def apply_manifest(self):
"""
Special linker for MSVC with support for embedding manifests into DLL's
and executables compiled by Visual Studio 2005 or probably later. Without
the manifest file, the binaries are unusable.
See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx
"""
if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST and getattr(self, 'link_task', None):
out_node = self.link_task.outputs[0]
man_node = out_node.parent.find_or_declare(out_node.name + '.manifest')
self.link_task.outputs.append(man_node)
self.link_task.do_manifest = True
def exec_mf(self):
"""
Create the manifest file
"""
env = self.env
mtool = env['MT']
if not mtool:
return 0
self.do_manifest = False
outfile = self.outputs[0].abspath()
manifest = None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest = out_node.abspath()
break
if manifest is None:
# Should never get here. If we do, it means the manifest file was
# never added to the outputs list, thus we don't have a manifest file
# to embed, so we just return.
return 0
# embedding mode. Different for EXE's and DLL's.
# see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
mode = ''
if 'cprogram' in self.generator.features or 'cxxprogram' in self.generator.features:
mode = '1'
elif 'cshlib' in self.generator.features or 'cxxshlib' in self.generator.features:
mode = '2'
debug('msvc: embedding manifest in mode %r' % mode)
lst = []
lst.append(env['MT'])
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(['-manifest', manifest])
lst.append('-outputresource:%s;%s' % (outfile, mode))
lst = [lst]
return self.exec_command(*lst)
def quote_response_command(self, flag):
if flag.find(' ') > -1:
for x in ('/LIBPATH:', '/IMPLIB:', '/OUT:', '/I'):
if flag.startswith(x):
flag = '%s"%s"' % (x, flag[len(x):])
break
else:
flag = '"%s"' % flag
return flag
def exec_response_command(self, cmd, **kw):
# not public yet
try:
tmp = None
if sys.platform.startswith('win') and isinstance(cmd, list) and len(' '.join(cmd)) >= 8192:
program = cmd[0] #unquoted program name, otherwise exec_command will fail
cmd = [self.quote_response_command(x) for x in cmd]
(fd, tmp) = tempfile.mkstemp()
os.write(fd, '\r\n'.join(i.replace('\\', '\\\\') for i in cmd[1:]).encode())
os.close(fd)
cmd = [program, '@' + tmp]
# no return here, that's on purpose
ret = self.generator.bld.exec_command(cmd, **kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass # anti-virus and indexers can keep the files open -_-
return ret
########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
def exec_command_msvc(self, *k, **kw):
"""
Change the command-line execution for msvc programs.
Instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in
"""
if isinstance(k[0], list):
lst = []
carry = ''
for a in k[0]:
if a == '/Fo' or a == '/doc' or a[-1] == ':':
carry = a
else:
lst.append(carry + a)
carry = ''
k = [lst]
if self.env['PATH']:
env = dict(self.env.env or os.environ)
env.update(PATH = ';'.join(self.env['PATH']))
kw['env'] = env
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
ret = self.exec_response_command(k[0], **kw)
if not ret and getattr(self, 'do_manifest', None):
ret = self.exec_mf()
return ret
def wrap_class(class_name):
"""
Manifest file processing and @response file workaround for command-line length limits on Windows systems
The indicated task class is replaced by a subclass to prevent conflicts in case the class is wrapped more than once
"""
cls = Task.classes.get(class_name, None)
if not cls:
return None
derived_class = type(class_name, (cls,), {})
def exec_command(self, *k, **kw):
if self.env['CC_NAME'] == 'msvc':
return self.exec_command_msvc(*k, **kw)
else:
return super(derived_class, self).exec_command(*k, **kw)
# Chain-up monkeypatch needed since exec_command() is in base class API
derived_class.exec_command = exec_command
# No chain-up behavior needed since the following methods aren't in
# base class API
derived_class.exec_response_command = exec_response_command
derived_class.quote_response_command = quote_response_command
derived_class.exec_command_msvc = exec_command_msvc
derived_class.exec_mf = exec_mf
return derived_class
for k in 'c cxx cprogram cxxprogram cshlib cxxshlib cstlib cxxstlib'.split():
wrap_class(k)
def make_winapp(self, family):
append = self.env.append_unique
append('DEFINES', 'WINAPI_FAMILY=%s' % family)
append('CXXFLAGS', '/ZW')
append('CXXFLAGS', '/TP')
for lib_path in self.env.LIBPATH:
append('CXXFLAGS','/AI%s'%lib_path)
@feature('winphoneapp')
@after_method('process_use')
@after_method('propagate_uselib_vars')
def make_winphone_app(self):
make_winapp(self, 'WINAPI_FAMILY_PHONE_APP')
conf.env.append_unique('LINKFLAGS', '/NODEFAULTLIB:ole32.lib')
conf.env.append_unique('LINKFLAGS', 'PhoneAppModelHost.lib')
@feature('winapp')
@after_method('process_use')
@after_method('propagate_uselib_vars')
def make_windows_app(self):
make_winapp(self, 'WINAPI_FAMILY_DESKTOP_APP')
|
|
import pytest
from nbformat.v4 import new_notebook, new_output
from ...preprocessors import SaveCells, SaveAutoGrades
from ...api import Gradebook
from ...utils import compute_checksum
from .base import BaseTestPreprocessor
from .. import (
create_grade_cell, create_grade_and_solution_cell, create_solution_cell)
@pytest.fixture
def preprocessors():
return (SaveCells(), SaveAutoGrades())
@pytest.fixture
def gradebook(request, db):
gb = Gradebook(db)
gb.add_assignment("ps0")
gb.add_student("bar")
def fin():
gb.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def resources(db):
return {
"nbgrader": {
"db_url": db,
"assignment": "ps0",
"notebook": "test",
"student": "bar"
}
}
class TestSaveAutoGrades(BaseTestPreprocessor):
def test_grade_correct_code(self, preprocessors, gradebook, resources):
"""Is a passing code cell correctly graded?"""
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 1
assert grade_cell.max_score == 1
assert grade_cell.auto_score == 1
assert grade_cell.manual_score == None
assert not grade_cell.needs_manual_grade
def test_grade_incorrect_code(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == 0
assert grade_cell.manual_score == None
assert not grade_cell.needs_manual_grade
def test_grade_unchanged_markdown(self, preprocessors, gradebook, resources):
"""Is an unchanged markdown cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == 0
assert grade_cell.manual_score == None
assert not grade_cell.needs_manual_grade
def test_grade_changed_markdown(self, preprocessors, gradebook, resources):
"""Is a changed markdown cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == None
assert grade_cell.manual_score == None
assert grade_cell.needs_manual_grade
def test_comment_unchanged_code(self, preprocessors, gradebook, resources):
"""Is an unchanged code cell given the correct comment?"""
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment == "No response."
def test_comment_changed_code(self, preprocessors, gradebook, resources):
"""Is a changed code cell given the correct comment?"""
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment is None
def test_comment_unchanged_markdown(self, preprocessors, gradebook, resources):
"""Is an unchanged markdown cell given the correct comment?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment == "No response."
def test_comment_changed_markdown(self, preprocessors, gradebook, resources):
"""Is a changed markdown cell given the correct comment?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment is None
def test_grade_existing_manual_grade(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == None
assert grade_cell.manual_score == None
assert grade_cell.needs_manual_grade
grade_cell.manual_score = 1
grade_cell.needs_manual_grade = False
gradebook.db.commit()
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 1
assert grade_cell.max_score == 1
assert grade_cell.auto_score == None
assert grade_cell.manual_score == 1
assert grade_cell.needs_manual_grade
def test_grade_existing_auto_comment(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment == "No response."
nb.cells[-1].source = 'goodbye'
preprocessors[1].preprocess(nb, resources)
gradebook.db.refresh(comment)
assert comment.auto_comment is None
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
import uuid
class UsersNegativeTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(UsersNegativeTestJSON, cls).setUpClass()
cls.alt_user = rand_name('test_user_')
cls.alt_password = rand_name('pass_')
cls.alt_email = cls.alt_user + '@testmail.tm'
cls.alt_tenant = rand_name('test_tenant_')
cls.alt_description = rand_name('desc_')
@attr(type=['negative', 'gate'])
def test_create_user_by_unauthorized_user(self):
# Non-administrator should not be authorized to create a user
self.data.setup_test_tenant()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_user, self.alt_user,
self.alt_password, self.data.tenant['id'],
self.alt_email)
@attr(type=['negative', 'gate'])
def test_create_user_with_empty_name(self):
# User with an empty name should not be created
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user, '',
self.alt_password, self.data.tenant['id'],
self.alt_email)
@attr(type=['negative', 'gate'])
def test_create_user_with_name_length_over_255(self):
# Length of user name filed should be restricted to 255 characters
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user,
'a' * 256, self.alt_password,
self.data.tenant['id'], self.alt_email)
@attr(type=['negative', 'gate'])
def test_create_user_with_duplicate_name(self):
# Duplicate user should not be created
self.data.setup_test_user()
self.assertRaises(exceptions.Duplicate, self.client.create_user,
self.data.test_user, self.data.test_password,
self.data.tenant['id'], self.data.test_email)
@attr(type=['negative', 'gate'])
def test_create_user_for_non_existant_tenant(self):
# Attempt to create a user in a non-existent tenant should fail
self.assertRaises(exceptions.NotFound, self.client.create_user,
self.alt_user, self.alt_password, '49ffgg99999',
self.alt_email)
@attr(type=['negative', 'gate'])
def test_create_user_request_without_a_token(self):
# Request to create a user without a valid token should fail
self.data.setup_test_tenant()
# Get the token of the current client
token = self.client.get_auth()
# Delete the token from database
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.create_user,
self.alt_user, self.alt_password,
self.data.tenant['id'], self.alt_email)
# Unset the token to allow further tests to generate a new token
self.client.clear_auth()
@attr(type=['negative', 'gate'])
def test_create_user_with_enabled_non_bool(self):
# Attempt to create a user with valid enabled para should fail
self.data.setup_test_tenant()
name = rand_name('test_user_')
self.assertRaises(exceptions.BadRequest, self.client.create_user,
name, self.alt_password,
self.data.tenant['id'],
self.alt_email, enabled=3)
@attr(type=['negative', 'gate'])
def test_update_user_for_non_existant_user(self):
# Attempt to update a user non-existent user should fail
user_name = rand_name('user-')
non_existent_id = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound, self.client.update_user,
non_existent_id, name=user_name)
@attr(type=['negative', 'gate'])
def test_update_user_request_without_a_token(self):
# Request to update a user without a valid token should fail
# Get the token of the current client
token = self.client.get_auth()
# Delete the token from database
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.update_user,
self.alt_user)
# Unset the token to allow further tests to generate a new token
self.client.clear_auth()
@attr(type=['negative', 'gate'])
def test_update_user_by_unauthorized_user(self):
# Non-administrator should not be authorized to update user
self.data.setup_test_tenant()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.update_user, self.alt_user)
@attr(type=['negative', 'gate'])
def test_delete_users_by_unauthorized_user(self):
# Non-administrator user should not be authorized to delete a user
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_user,
self.data.user['id'])
@attr(type=['negative', 'gate'])
def test_delete_non_existant_user(self):
# Attempt to delete a non-existent user should fail
self.assertRaises(exceptions.NotFound, self.client.delete_user,
'junk12345123')
@attr(type=['negative', 'gate'])
def test_delete_user_request_without_a_token(self):
# Request to delete a user without a valid token should fail
# Get the token of the current client
token = self.client.get_auth()
# Delete the token from database
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.delete_user,
self.alt_user)
# Unset the token to allow further tests to generate a new token
self.client.clear_auth()
@attr(type=['negative', 'gate'])
def test_authentication_for_disabled_user(self):
# Disabled user's token should not get authenticated
self.data.setup_test_user()
self.disable_user(self.data.test_user)
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
self.data.test_user,
self.data.test_password,
self.data.test_tenant)
@attr(type=['negative', 'gate'])
def test_authentication_when_tenant_is_disabled(self):
# User's token for a disabled tenant should not be authenticated
self.data.setup_test_user()
self.disable_tenant(self.data.test_tenant)
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
self.data.test_user,
self.data.test_password,
self.data.test_tenant)
@attr(type=['negative', 'gate'])
def test_authentication_with_invalid_tenant(self):
# User's token for an invalid tenant should not be authenticated
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
self.data.test_user,
self.data.test_password,
'junktenant1234')
@attr(type=['negative', 'gate'])
def test_authentication_with_invalid_username(self):
# Non-existent user's token should not get authenticated
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
'junkuser123', self.data.test_password,
self.data.test_tenant)
@attr(type=['negative', 'gate'])
def test_authentication_with_invalid_password(self):
# User's token with invalid password should not be authenticated
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
self.data.test_user, 'junkpass1234',
self.data.test_tenant)
@attr(type=['negative', 'gate'])
def test_get_users_by_unauthorized_user(self):
# Non-administrator user should not be authorized to get user list
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.get_users)
@attr(type=['negative', 'gate'])
def test_get_users_request_without_token(self):
# Request to get list of users without a valid token should fail
token = self.client.get_auth()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.get_users)
self.client.clear_auth()
@attr(type=['negative', 'gate'])
def test_list_users_with_invalid_tenant(self):
# Should not be able to return a list of all
# users for a non-existent tenant
# Assign invalid tenant ids
invalid_id = list()
invalid_id.append(rand_name('999'))
invalid_id.append('alpha')
invalid_id.append(rand_name("dddd@#%%^$"))
invalid_id.append('!@#()$%^&*?<>{}[]')
# List the users with invalid tenant id
for invalid in invalid_id:
self.assertRaises(exceptions.NotFound,
self.client.list_users_for_tenant, invalid)
class UsersNegativeTestXML(UsersNegativeTestJSON):
_interface = 'xml'
|
|
"""HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import os
import sys
import traceback
import warnings
import http.cookies
import urllib.parse
from multidict import MultiDictProxy, MultiDict, CIMultiDict, upstr
import aiohttp
from .client_reqrep import ClientRequest, ClientResponse
from .errors import WSServerHandshakeError
from .websocket import WS_KEY, WebSocketParser, WebSocketWriter
from .websocket_client import ClientWebSocketResponse
from . import hdrs, helpers
__all__ = ('ClientSession', 'request', 'get', 'options', 'head',
'delete', 'post', 'put', 'patch', 'ws_connect')
PY_35 = sys.version_info >= (3, 5)
class ClientSession:
"""First-class interface for making HTTP requests."""
_source_traceback = None
_connector = None
def __init__(self, *, connector=None, loop=None, cookies=None,
headers=None, skip_auto_headers=None,
auth=None, request_class=ClientRequest,
response_class=ClientResponse,
ws_response_class=ClientWebSocketResponse,
version=aiohttp.HttpVersion11):
if connector is None:
connector = aiohttp.TCPConnector(loop=loop)
loop = connector._loop # never None
else:
if loop is None:
loop = connector._loop # never None
elif connector._loop is not loop:
raise ValueError("loop argument must agree with connector")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._cookies = http.cookies.SimpleCookie()
# For Backward compatability with `share_cookies` connectors
if connector._share_cookies:
self._update_cookies(connector.cookies)
if cookies is not None:
self._update_cookies(cookies)
self._connector = connector
self._default_auth = auth
self._version = version
# Convert to list of tuples
if headers:
headers = CIMultiDict(headers)
else:
headers = CIMultiDict()
self._default_headers = headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset([upstr(i)
for i in skip_auto_headers])
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
def __del__(self, _warnings=warnings):
if not self.closed:
self.close()
_warnings.warn("Unclosed client session {!r}".format(self),
ResourceWarning)
context = {'client_session': self,
'message': 'Unclosed client session'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
"""Perform HTTP request."""
return _RequestContextManager(
self._request(
method,
url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof))
@asyncio.coroutine
def _request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
if version is not None:
warnings.warn("HTTP version should be specified "
"by ClientSession constructor", DeprecationWarning)
else:
version = self._version
if self.closed:
raise RuntimeError('Session is closed')
redirects = 0
history = []
if not isinstance(method, upstr):
method = upstr(method)
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit Authorization header
# with `auth` argument
if (headers is not None and
auth is not None and
hdrs.AUTHORIZATION in headers):
raise ValueError("Can't combine `Authorization` header with "
"`auth` argument")
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(upstr(i))
while True:
req = self._request_class(
method, url, params=params, headers=headers,
skip_auto_headers=skip_headers, data=data,
cookies=self.cookies, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
expect100=expect100,
loop=self._loop, response_class=self._response_class)
conn = yield from self._connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError(*exc.args) from exc
self._update_cookies(resp.cookies)
# For Backward compatability with `share_cookie` connectors
if self._connector._share_cookies:
self._connector.update_cookies(resp.cookies)
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
break
else:
# TODO: close the connection if BODY is large enough
# Redirect with big BODY is forbidden by HTTP protocol
# but malformed server may send illegal response.
# Small BODIES with text like "Not Found" are still
# perfectly fine and should be accepted.
yield from resp.release()
# For 301 and 302, mimic IE behaviour, now changed in RFC.
# Details: https://github.com/kennethreitz/requests/pull/269
if resp.status != 307:
method = hdrs.METH_GET
data = None
if headers.get(hdrs.CONTENT_LENGTH):
headers.pop(hdrs.CONTENT_LENGTH)
r_url = (resp.headers.get(hdrs.LOCATION) or
resp.headers.get(hdrs.URI))
scheme = urllib.parse.urlsplit(r_url)[0]
if scheme not in ('http', 'https', ''):
resp.close()
raise ValueError('Can redirect only to http or https')
elif not scheme:
r_url = urllib.parse.urljoin(url, r_url)
url = r_url
params = None
yield from resp.release()
continue
break
resp._history = tuple(history)
return resp
def ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None,
headers=None):
"""Initiate websocket connection."""
return _WSRequestContextManager(
self._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
auth=auth,
origin=origin,
headers=headers))
@asyncio.coroutine
def _ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None,
headers=None):
sec_key = base64.b64encode(os.urandom(16))
if headers is None:
headers = CIMultiDict()
default_headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_VERSION: '13',
hdrs.SEC_WEBSOCKET_KEY: sec_key.decode(),
}
for key, value in default_headers.items():
if key not in headers:
headers[key] = value
if protocols:
headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)
if origin is not None:
headers[hdrs.ORIGIN] = origin
# send request
resp = yield from self.get(url, headers=headers,
read_until_eof=False,
auth=auth)
try:
# check handshake
if resp.status != 101:
raise WSServerHandshakeError(
message='Invalid response status',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':
raise WSServerHandshakeError(
message='Invalid upgrade header',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':
raise WSServerHandshakeError(
message='Invalid connection header',
code=resp.status,
headers=resp.headers)
# key calculation
key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')
match = base64.b64encode(
hashlib.sha1(sec_key + WS_KEY).digest()).decode()
if key != match:
raise WSServerHandshakeError(
message='Invalid challenge response',
code=resp.status,
headers=resp.headers)
# websocket protocol
protocol = None
if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
resp_protocols = [
proto.strip() for proto in
resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]
for proto in resp_protocols:
if proto in protocols:
protocol = proto
break
reader = resp.connection.reader.set_parser(WebSocketParser)
resp.connection.writer.set_tcp_nodelay(True)
writer = WebSocketWriter(resp.connection.writer, use_mask=True)
except Exception:
resp.close()
raise
else:
return self._ws_response_class(reader,
writer,
protocol,
resp,
timeout,
autoclose,
autoping,
self._loop)
def _update_cookies(self, cookies):
"""Update shared cookies."""
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
# before Python 3.4
dict.__setitem__(self.cookies, name, value)
else:
self.cookies[name] = value
def _prepare_headers(self, headers):
""" Add default headers and transform it to CIMultiDict
"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers)
added_names = set()
for key, value in headers.items():
if key in added_names:
result.add(key, value)
else:
result[key] = value
added_names.add(key)
return result
def get(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP GET request."""
return _RequestContextManager(
self._request(hdrs.METH_GET, url,
allow_redirects=allow_redirects,
**kwargs))
def options(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP OPTIONS request."""
return _RequestContextManager(
self._request(hdrs.METH_OPTIONS, url,
allow_redirects=allow_redirects,
**kwargs))
def head(self, url, *, allow_redirects=False, **kwargs):
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs))
def post(self, url, *, data=None, **kwargs):
"""Perform HTTP POST request."""
return _RequestContextManager(
self._request(hdrs.METH_POST, url,
data=data,
**kwargs))
def put(self, url, *, data=None, **kwargs):
"""Perform HTTP PUT request."""
return _RequestContextManager(
self._request(hdrs.METH_PUT, url,
data=data,
**kwargs))
def patch(self, url, *, data=None, **kwargs):
"""Perform HTTP PATCH request."""
return _RequestContextManager(
self._request(hdrs.METH_PATCH, url,
data=data,
**kwargs))
def delete(self, url, **kwargs):
"""Perform HTTP DELETE request."""
return _RequestContextManager(
self._request(hdrs.METH_DELETE, url,
**kwargs))
def close(self):
"""Close underlying connector.
Release all acquired resources.
"""
if not self.closed:
self._connector.close()
self._connector = None
ret = helpers.create_future(self._loop)
ret.set_result(None)
return ret
@property
def closed(self):
"""Is client session closed.
A readonly property.
"""
return self._connector is None or self._connector.closed
@property
def connector(self):
"""Connector instance used for the session."""
return self._connector
@property
def cookies(self):
"""The session cookies."""
return self._cookies
@property
def version(self):
"""The session HTTP protocol version."""
return self._version
def detach(self):
"""Detach connector from session without closing the former.
Session is switched to closed state anyway.
"""
self._connector = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if PY_35:
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
yield from self.close()
if PY_35:
from collections.abc import Coroutine
base = Coroutine
else:
base = object
class _BaseRequestContextManager(base):
__slots__ = ('_coro', '_resp')
def __init__(self, coro):
self._coro = coro
self._resp = None
def send(self, value):
return self._coro.send(value)
def throw(self, typ, val=None, tb=None):
if val is None:
return self._coro.throw(typ)
elif tb is None:
return self._coro.throw(typ, val)
else:
return self._coro.throw(typ, val, tb)
def close(self):
return self._coro.close()
@property
def gi_frame(self):
return self._coro.gi_frame
@property
def gi_running(self):
return self._coro.gi_running
@property
def gi_code(self):
return self._coro.gi_code
def __next__(self):
return self.send(None)
@asyncio.coroutine
def __iter__(self):
resp = yield from self._coro
return resp
if PY_35:
def __await__(self):
resp = yield from self._coro
return resp
@asyncio.coroutine
def __aenter__(self):
self._resp = yield from self._coro
return self._resp
if not PY_35:
try:
from asyncio import coroutines
coroutines._COROUTINE_TYPES += (_BaseRequestContextManager,)
except:
pass
class _RequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
if exc_type is not None:
self._resp.close()
else:
yield from self._resp.release()
class _WSRequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
yield from self._resp.close()
class _DetachedRequestContextManager(_RequestContextManager):
__slots__ = _RequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
@asyncio.coroutine
def __iter__(self):
try:
return (yield from self._coro)
except:
self._session.close()
raise
if PY_35:
def __await__(self):
try:
return (yield from self._coro)
except:
self._session.close()
raise
def __del__(self):
self._session.detach()
class _DetachedWSRequestContextManager(_WSRequestContextManager):
__slots__ = _WSRequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
def __del__(self):
self._session.detach()
def request(method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
cookies=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
connector=None,
loop=None,
read_until_eof=True,
request_class=None,
response_class=None):
"""Constructs and sends a request. Returns response object.
:param str method: HTTP method
:param str url: request url
:param params: (optional) Dictionary or bytes to be sent in the query
string of the new request
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the request
:param dict headers: (optional) Dictionary of HTTP Headers to send with
the request
:param dict cookies: (optional) Dict object to send with the request
:param auth: (optional) BasicAuth named tuple represent HTTP Basic Auth
:type auth: aiohttp.helpers.BasicAuth
:param bool allow_redirects: (optional) If set to False, do not follow
redirects
:param version: Request HTTP version.
:type version: aiohttp.protocol.HttpVersion
:param bool compress: Set to True if request has to be compressed
with deflate encoding.
:param chunked: Set to chunk size for chunked transfer encoding.
:type chunked: bool or int
:param bool expect100: Expect 100-continue response from server.
:param connector: BaseConnector sub-class instance to support
connection pooling.
:type connector: aiohttp.connector.BaseConnector
:param bool read_until_eof: Read response until eof if response
does not have Content-Length header.
:param request_class: (optional) Custom Request class implementation.
:param response_class: (optional) Custom Response class implementation.
:param loop: Optional event loop.
Usage::
>>> import aiohttp
>>> resp = yield from aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = yield from resp.read()
"""
warnings.warn("Use ClientSession().request() instead", DeprecationWarning)
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
kwargs = {}
if request_class is not None:
kwargs['request_class'] = request_class
if response_class is not None:
kwargs['response_class'] = response_class
session = ClientSession(loop=loop,
cookies=cookies,
connector=connector,
**kwargs)
return _DetachedRequestContextManager(
session._request(method, url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof),
session=session)
def get(url, **kwargs):
warnings.warn("Use ClientSession().get() instead", DeprecationWarning)
return request(hdrs.METH_GET, url, **kwargs)
def options(url, **kwargs):
warnings.warn("Use ClientSession().options() instead", DeprecationWarning)
return request(hdrs.METH_OPTIONS, url, **kwargs)
def head(url, **kwargs):
warnings.warn("Use ClientSession().head() instead", DeprecationWarning)
return request(hdrs.METH_HEAD, url, **kwargs)
def post(url, **kwargs):
warnings.warn("Use ClientSession().post() instead", DeprecationWarning)
return request(hdrs.METH_POST, url, **kwargs)
def put(url, **kwargs):
warnings.warn("Use ClientSession().put() instead", DeprecationWarning)
return request(hdrs.METH_PUT, url, **kwargs)
def patch(url, **kwargs):
warnings.warn("Use ClientSession().patch() instead", DeprecationWarning)
return request(hdrs.METH_PATCH, url, **kwargs)
def delete(url, **kwargs):
warnings.warn("Use ClientSession().delete() instead", DeprecationWarning)
return request(hdrs.METH_DELETE, url, **kwargs)
def ws_connect(url, *, protocols=(), timeout=10.0, connector=None, auth=None,
ws_response_class=ClientWebSocketResponse, autoclose=True,
autoping=True, loop=None, origin=None, headers=None):
warnings.warn("Use ClientSession().ws_connect() instead",
DeprecationWarning)
if loop is None:
loop = asyncio.get_event_loop()
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
session = aiohttp.ClientSession(loop=loop, connector=connector, auth=auth,
ws_response_class=ws_response_class,
headers=headers)
return _DetachedWSRequestContextManager(
session._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
origin=origin),
session=session)
|
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RPC/IPC support."""
import abc
import http.client
import io
from avro import io as avro_io
from avro import protocol
from avro import schema
# ------------------------------------------------------------------------------
# Constants
HANDSHAKE_REQUEST_SCHEMA = schema.Parse("""
{
"type": "record",
"name": "HandshakeRequest", "namespace":"org.apache.avro.ipc",
"fields": [
{"name": "clientHash",
"type": {"type": "fixed", "name": "MD5", "size": 16}},
{"name": "clientProtocol", "type": ["null", "string"]},
{"name": "serverHash", "type": "MD5"},
{"name": "meta", "type": ["null", {"type": "map", "values": "bytes"}]}
]
}
""")
HANDSHAKE_RESPONSE_SCHEMA = schema.Parse("""
{
"type": "record",
"name": "HandshakeResponse", "namespace": "org.apache.avro.ipc",
"fields": [
{"name": "match",
"type": {"type": "enum", "name": "HandshakeMatch",
"symbols": ["BOTH", "CLIENT", "NONE"]}},
{"name": "serverProtocol",
"type": ["null", "string"]},
{"name": "serverHash",
"type": ["null", {"type": "fixed", "name": "MD5", "size": 16}]},
{"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}]}
]
}
""")
HANDSHAKE_REQUESTOR_WRITER = avro_io.DatumWriter(HANDSHAKE_REQUEST_SCHEMA)
HANDSHAKE_REQUESTOR_READER = avro_io.DatumReader(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_WRITER = avro_io.DatumWriter(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_READER = avro_io.DatumReader(HANDSHAKE_REQUEST_SCHEMA)
META_SCHEMA = schema.Parse('{"type": "map", "values": "bytes"}')
META_WRITER = avro_io.DatumWriter(META_SCHEMA)
META_READER = avro_io.DatumReader(META_SCHEMA)
SYSTEM_ERROR_SCHEMA = schema.Parse('["string"]')
# protocol cache
REMOTE_HASHES = {}
REMOTE_PROTOCOLS = {}
# Decoder/encoder for a 32 bits big-endian integer.
UINT32_BE = avro_io.STRUCT_INT
# Default size of the buffers use to frame messages:
BUFFER_SIZE = 8192
# ------------------------------------------------------------------------------
# Exceptions
class AvroRemoteException(schema.AvroException):
"""
Raised when an error message is sent by an Avro requestor or responder.
"""
def __init__(self, fail_msg=None):
schema.AvroException.__init__(self, fail_msg)
class ConnectionClosedException(schema.AvroException):
pass
# ------------------------------------------------------------------------------
# Base IPC Classes (Requestor/Responder)
class BaseRequestor(object):
"""Base class for the client side of a protocol interaction."""
def __init__(self, local_protocol, transceiver):
self._local_protocol = local_protocol
self._transceiver = transceiver
self._remote_protocol = None
self._remote_hash = None
self._send_protocol = None
@property
def local_protocol(self):
return self._local_protocol
@property
def transceiver(self):
return self._transceiver
# read/write properties
def set_remote_protocol(self, new_remote_protocol):
self._remote_protocol = new_remote_protocol
REMOTE_PROTOCOLS[self.transceiver.remote_name] = self.remote_protocol
remote_protocol = property(lambda self: self._remote_protocol,
set_remote_protocol)
def set_remote_hash(self, new_remote_hash):
self._remote_hash = new_remote_hash
REMOTE_HASHES[self.transceiver.remote_name] = self.remote_hash
remote_hash = property(lambda self: self._remote_hash, set_remote_hash)
def set_send_protocol(self, new_send_protocol):
self._send_protocol = new_send_protocol
send_protocol = property(lambda self: self._send_protocol, set_send_protocol)
def request(self, message_name, request_datum):
"""Writes a request message and reads a response or error message.
Args:
message_name: Name of the IPC method.
request_datum: IPC request.
Returns:
The IPC response.
"""
# build handshake and call request
buffer_writer = io.BytesIO()
buffer_encoder = avro_io.BinaryEncoder(buffer_writer)
self.write_handshake_request(buffer_encoder)
self.write_call_request(message_name, request_datum, buffer_encoder)
# send the handshake and call request; block until call response
call_request = buffer_writer.getvalue()
return self.issue_request(call_request, message_name, request_datum)
def write_handshake_request(self, encoder):
local_hash = self.local_protocol.md5
remote_name = self.transceiver.remote_name
remote_hash = REMOTE_HASHES.get(remote_name)
if remote_hash is None:
remote_hash = local_hash
self.remote_protocol = self.local_protocol
request_datum = {}
request_datum['clientHash'] = local_hash
request_datum['serverHash'] = remote_hash
if self.send_protocol:
request_datum['clientProtocol'] = str(self.local_protocol)
HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
def write_call_request(self, message_name, request_datum, encoder):
"""
The format of a call request is:
* request metadata, a map with values of type bytes
* the message name, an Avro string, followed by
* the message parameters. Parameters are serialized according to
the message's request declaration.
"""
# request metadata (not yet implemented)
request_metadata = {}
META_WRITER.write(request_metadata, encoder)
# message name
message = self.local_protocol.messages.get(message_name)
if message is None:
raise schema.AvroException('Unknown message: %s' % message_name)
encoder.write_utf8(message.name)
# message parameters
self.write_request(message.request, request_datum, encoder)
def write_request(self, request_schema, request_datum, encoder):
datum_writer = avro_io.DatumWriter(request_schema)
datum_writer.write(request_datum, encoder)
def read_handshake_response(self, decoder):
handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
match = handshake_response.get('match')
if match == 'BOTH':
self.send_protocol = False
return True
elif match == 'CLIENT':
if self.send_protocol:
raise schema.AvroException('Handshake failure.')
self.remote_protocol = protocol.Parse(
handshake_response.get('serverProtocol'))
self.remote_hash = handshake_response.get('serverHash')
self.send_protocol = False
return True
elif match == 'NONE':
if self.send_protocol:
raise schema.AvroException('Handshake failure.')
self.remote_protocol = protocol.Parse(
handshake_response.get('serverProtocol'))
self.remote_hash = handshake_response.get('serverHash')
self.send_protocol = True
return False
else:
raise schema.AvroException('Unexpected match: %s' % match)
def read_call_response(self, message_name, decoder):
"""
The format of a call response is:
* response metadata, a map with values of type bytes
* a one-byte error flag boolean, followed by either:
o if the error flag is false,
the message response, serialized per the message's response schema.
o if the error flag is true,
the error, serialized per the message's error union schema.
"""
# response metadata
response_metadata = META_READER.read(decoder)
# remote response schema
remote_message_schema = self.remote_protocol.messages.get(message_name)
if remote_message_schema is None:
raise schema.AvroException('Unknown remote message: %s' % message_name)
# local response schema
local_message_schema = self.local_protocol.messages.get(message_name)
if local_message_schema is None:
raise schema.AvroException('Unknown local message: %s' % message_name)
# error flag
if not decoder.read_boolean():
writer_schema = remote_message_schema.response
reader_schema = local_message_schema.response
return self.read_response(writer_schema, reader_schema, decoder)
else:
writer_schema = remote_message_schema.errors
reader_schema = local_message_schema.errors
raise self.read_error(writer_schema, reader_schema, decoder)
def read_response(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
result = datum_reader.read(decoder)
return result
def read_error(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
return AvroRemoteException(datum_reader.read(decoder))
class Requestor(BaseRequestor):
def issue_request(self, call_request, message_name, request_datum):
call_response = self.transceiver.Transceive(call_request)
# process the handshake and call response
buffer_decoder = avro_io.BinaryDecoder(io.BytesIO(call_response))
call_response_exists = self.read_handshake_response(buffer_decoder)
if call_response_exists:
return self.read_call_response(message_name, buffer_decoder)
else:
return self.request(message_name, request_datum)
class Responder(object):
"""Base class for the server side of a protocol interaction."""
def __init__(self, local_protocol):
self._local_protocol = local_protocol
self._local_hash = self.local_protocol.md5
self._protocol_cache = {}
self.set_protocol_cache(self.local_hash, self.local_protocol)
# read-only properties
local_protocol = property(lambda self: self._local_protocol)
local_hash = property(lambda self: self._local_hash)
protocol_cache = property(lambda self: self._protocol_cache)
# utility functions to manipulate protocol cache
def get_protocol_cache(self, hash):
return self.protocol_cache.get(hash)
def set_protocol_cache(self, hash, protocol):
self.protocol_cache[hash] = protocol
def respond(self, call_request):
"""
Called by a server to deserialize a request, compute and serialize
a response or error. Compare to 'handle()' in Thrift.
"""
buffer_reader = io.StringIO(call_request)
buffer_decoder = avro_io.BinaryDecoder(buffer_reader)
buffer_writer = io.StringIO()
buffer_encoder = avro_io.BinaryEncoder(buffer_writer)
error = None
response_metadata = {}
try:
remote_protocol = self.process_handshake(buffer_decoder, buffer_encoder)
# handshake failure
if remote_protocol is None:
return buffer_writer.getvalue()
# read request using remote protocol
request_metadata = META_READER.read(buffer_decoder)
remote_message_name = buffer_decoder.read_utf8()
# get remote and local request schemas so we can do
# schema resolution (one fine day)
remote_message = remote_protocol.messages.get(remote_message_name)
if remote_message is None:
fail_msg = 'Unknown remote message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
local_message = self.local_protocol.messages.get(remote_message_name)
if local_message is None:
fail_msg = 'Unknown local message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
writer_schema = remote_message.request
reader_schema = local_message.request
request = self.read_request(writer_schema, reader_schema,
buffer_decoder)
# perform server logic
try:
response = self.invoke(local_message, request)
except AvroRemoteException as e:
error = e
except Exception as e:
error = AvroRemoteException(str(e))
# write response using local protocol
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(error is not None)
if error is None:
writer_schema = local_message.response
self.write_response(writer_schema, response, buffer_encoder)
else:
writer_schema = local_message.errors
self.write_error(writer_schema, error, buffer_encoder)
except schema.AvroException as e:
error = AvroRemoteException(str(e))
buffer_encoder = avro_io.BinaryEncoder(io.StringIO())
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(True)
self.write_error(SYSTEM_ERROR_SCHEMA, error, buffer_encoder)
return buffer_writer.getvalue()
def process_handshake(self, decoder, encoder):
handshake_request = HANDSHAKE_RESPONDER_READER.read(decoder)
handshake_response = {}
# determine the remote protocol
client_hash = handshake_request.get('clientHash')
client_protocol = handshake_request.get('clientProtocol')
remote_protocol = self.get_protocol_cache(client_hash)
if remote_protocol is None and client_protocol is not None:
remote_protocol = protocol.Parse(client_protocol)
self.set_protocol_cache(client_hash, remote_protocol)
# evaluate remote's guess of the local protocol
server_hash = handshake_request.get('serverHash')
if self.local_hash == server_hash:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'BOTH'
else:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'CLIENT'
if handshake_response['match'] != 'BOTH':
handshake_response['serverProtocol'] = str(self.local_protocol)
handshake_response['serverHash'] = self.local_hash
HANDSHAKE_RESPONDER_WRITER.write(handshake_response, encoder)
return remote_protocol
def invoke(self, local_message, request):
"""
Aactual work done by server: cf. handler in thrift.
"""
pass
def read_request(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
return datum_reader.read(decoder)
def write_response(self, writer_schema, response_datum, encoder):
datum_writer = avro_io.DatumWriter(writer_schema)
datum_writer.write(response_datum, encoder)
def write_error(self, writer_schema, error_exception, encoder):
datum_writer = avro_io.DatumWriter(writer_schema)
datum_writer.write(str(error_exception), encoder)
# ------------------------------------------------------------------------------
# Framed message
class FramedReader(object):
"""Wrapper around a file-like object to read framed data."""
def __init__(self, reader):
self._reader = reader
def Read(self):
"""Reads one message from the configured reader.
Returns:
The message, as bytes.
"""
message = io.BytesIO()
message_size = self._ReadInt32()
while message_size > 0:
while message_size > 0:
data_bytes = self._reader.read(message_size)
if len(data_bytes) == 0:
raise ConnectionClosedException('Reader read 0 bytes.')
message.write(data_bytes)
message_size -= len(data_bytes)
message_size = self._ReadInt32()
return message.getvalue()
def _ReadInt32(self):
encoded = self._reader.read(UINT32_BE.size)
if len(encoded) != UINT32_BE.size:
raise ConnectionClosedException('Invalid header: %r' % encoded)
return UINT32_BE.unpack(encoded)[0]
class FramedWriter(object):
"""Wrapper around a file-like object to write framed data."""
def __init__(self, writer):
self._writer = writer
def Write(self, message):
"""Writes a message.
Args:
message: Message to write, as bytes.
"""
while len(message) > 0:
chunk_size = max(BUFFER_SIZE, len(message))
chunk = message[:chunk_size]
self._WriteBuffer(chunk)
message = message[chunk_size:]
# A message is always terminated by a zero-length buffer.
self._WriteUnsignedInt32(0)
def _WriteBuffer(self, chunk):
self._WriteUnsignedInt32(len(chunk))
self._writer.write(chunk)
def _WriteUnsignedInt32(self, uint32):
self._writer.write(UINT32_BE.pack(uint32))
# ------------------------------------------------------------------------------
# Transceiver (send/receive channel)
class Transceiver(object, metaclass=abc.ABCMeta):
@abc.abstractproperty
def remote_name(self):
pass
@abc.abstractmethod
def ReadMessage(self):
"""Reads a single message from the channel.
Blocks until a message can be read.
Returns:
The message read from the channel.
"""
pass
@abc.abstractmethod
def WriteMessage(self, message):
"""Writes a message into the channel.
Blocks until the message has been written.
Args:
message: Message to write.
"""
pass
def Transceive(self, request):
"""Processes a single request-reply interaction.
Synchronous request-reply interaction.
Args:
request: Request message.
Returns:
The reply message.
"""
self.WriteMessage(request)
result = self.ReadMessage()
return result
def Close(self):
"""Closes this transceiver."""
pass
class HTTPTransceiver(Transceiver):
"""HTTP-based transceiver implementation."""
def __init__(self, host, port, req_resource='/'):
"""Initializes a new HTTP transceiver.
Args:
host: Name or IP address of the remote host to interact with.
port: Port the remote server is listening on.
req_resource: Optional HTTP resource path to use, '/' by default.
"""
self._req_resource = req_resource
self._conn = http.client.HTTPConnection(host, port)
self._conn.connect()
@property
def remote_name(self):
return self._conn.sock.getsockname()
def ReadMessage(self):
response = self._conn.getresponse()
response_reader = FramedReader(response)
framed_message = response_reader.Read()
response.read() # ensure we're ready for subsequent requests
return framed_message
def WriteMessage(self, message):
req_method = 'POST'
req_headers = {'Content-Type': 'avro/binary'}
bio = io.BytesIO()
req_body_buffer = FramedWriter(bio)
req_body_buffer.Write(message)
req_body = bio.getvalue()
self._conn.request(req_method, self.req_resource, req_body, req_headers)
def Close(self):
self._conn.close()
self._conn = None
# ------------------------------------------------------------------------------
# Server Implementations
|
|
import os
import sys
import zipfile
from cStringIO import StringIO
from os.path import basename, getsize, isfile, isdir, join
import metadata
import dist_naming
from requirement import Req, add_Reqs_to_spec, filter_name, dist_as_req
from enstaller.utils import (comparable_version, md5_file,
rm_rf, write_data_from_url)
from egginst.utils import pprint_fn_action
class Chain(object):
def __init__(self, repos=[], verbose=False):
self.verbose = verbose
# maps distributions to specs
self.index = {}
# Chain of repositories, either local or remote
self.repos = []
for repo in repos:
# These are file:// (optionally indexed) or http:// (indexed)
self.add_repo(repo)
if self.verbose:
self.print_repos()
def print_repos(self):
print 'Repositories:'
for r in self.repos:
print '\t%r' % r
def add_repo(self, repo, index_fn='index-depend.txt'):
"""
Add a repo to the chain, i.e. read the index file of the url,
parse it and update the index.
"""
if self.verbose:
print "Adding repository:", repo
repo = dist_naming.cleanup_reponame(repo)
self.repos.append(repo)
index_url = repo + index_fn
if index_url.startswith('file://'):
if isfile(index_url[7:]):
# A local url with index file
if self.verbose:
print "\tfound index", index_url
else:
# A local url without index file
self.index_all_files(repo)
return
if self.verbose:
print "\treading:", index_url
faux = StringIO()
write_data_from_url(faux, index_url)
index_data = faux.getvalue()
faux.close()
new_index = metadata.parse_depend_index(index_data)
for spec in new_index.itervalues():
add_Reqs_to_spec(spec)
for distname, spec in new_index.iteritems():
self.index[repo + distname] = spec
def get_matches_repo(self, req, repo):
"""
Return the set of distributions which match the requirement from a
specified repository.
"""
matches = set()
for dist, spec in self.index.iteritems():
if dist_naming.repo_dist(dist) == repo and req.matches(spec):
matches.add(dist)
return matches
def get_matches(self, req):
"""
Return the set of distributions which match the requirement from the
first repository in the chain which contains at least one match.
"""
for repo in self.repos:
matches = self.get_matches_repo(req, repo)
if matches:
return matches
# no matching distributions are found in any repo
return set()
def get_version_build(self, dist):
"""
Returns a tuple(version, build) for a distribution, version is a
RationalVersion object (see verlib). This method is used below
for determining the distribution with the largest version and build
number.
"""
return dist_naming.comparable_spec(self.index[dist])
def get_dist(self, req):
"""
Return the distributions with the largest version and build number
from the first repository which contains any matches.
"""
lst = list(self.get_matches(req))
lst.sort(key=self.get_version_build)
if not lst:
return None
return lst[-1]
def reqs_dist(self, dist):
"""
Return the set of requirement objects of the distribution.
"""
return self.index[dist]['Reqs']
def select_new_reqs(self, reqs, dist):
"""
Selects new requirements, which are listed as dependencies in the
distribution 'dist', and are not already in the requirements 'reqs',
unless the distribution requires something more strict.
"""
result = set()
for r in self.reqs_dist(dist):
# from all the reqs (we already have collected) filter the
# ones with the same project name
rs2 = filter_name(reqs, r.name)
if rs2:
# if there are requirements for an existing project name,
# only add if it is more strict
for r2 in rs2:
if r2.strictness > r.strictness:
result.add(r2)
else:
# otherwise, just add it, there is no requirement for this
# project yet
result.add(r)
return result
def add_reqs(self, reqs, req, level=1):
"""
Finds requirements of 'req', recursively and adds them to 'reqs',
which is a dictionary mapping requirements to a
tuple(recursion level, distribution which requires the requirement)
"""
for dist in self.get_matches(req):
for r in self.select_new_reqs(reqs, dist):
if r in reqs:
continue
reqs[r] = (level, dist)
self.add_reqs(reqs, r, level + 1)
def get_reqs(self, req):
"""
Returns a dictionary mapping all requirements found recursively
to the distribution which requires it.
"""
# the root requirement (in the argument) itself maps to recursion
# level 0 and a non-existent distribution (because the required by
# the argument of this function and not any other distribution)
assert req.strictness == 3, req
reqs1 = {req: (0, 'ROOT')}
# add all requirements for the root requirement
self.add_reqs(reqs1, req)
if self.verbose:
print "Requirements: (-level, strictness)"
for r in sorted(reqs1):
print '\t%-33r %3i %3i' % (r, -reqs1[r][0], r.strictness)
reqs2 = {}
for name in set(r.name for r in reqs1):
# get all requirements for the name
rs = []
for r in filter_name(reqs1, name):
# append a tuple with:
# * tuple(negative recursion level, strictness)
# * requirement itself
# * distribution requiring it
rs.append(((-reqs1[r][0], r.strictness), r, reqs1[r][1]))
rs.sort()
r, d = rs[-1][1:]
reqs2[r] = d
return reqs2
def install_order(self, req, recur=True):
"""
Return the list of distributions which need to be installed.
The returned list is given in dependency order, i.e. the
distributions can be installed in this order without any package
being installed before its dependencies got installed.
"""
if self.verbose:
print "Determining install order for %r" % req
dist_required = self.get_dist(req)
if dist_required is None:
return None
if not recur:
return [dist_required]
req = dist_as_req(dist_required)
if self.verbose:
print dist_required
print "Requirement: %r" % req
dists = []
for r, d in self.get_reqs(req).iteritems():
dist = self.get_dist(r)
if dist:
dists.append(dist)
continue
print 'ERROR: No distribution found for: %r' % r
if d != 'ROOT':
print ' required by: %s' % d
sys.exit(1)
# the distributions corresponding to the requirements must be sorted
# because the output of this function is otherwise not deterministic
dists.sort()
# maps dist -> set of required (project) names
rns = {}
for dist in dists:
rns[dist] = set(r.name for r in self.reqs_dist(dist))
# As long as we have things missing, simply look for things which
# can be added, i.e. all the requirements have been added already
res = []
names_inst = set()
while len(res) < len(dists):
n = len(res)
for dist in dists:
if dist in res:
continue
# see if all required packages were added already
if all(bool(n in names_inst) for n in rns[dist]):
res.append(dist)
names_inst.add(self.index[dist]['cname'])
assert len(names_inst) == len(res)
if len(res) == n:
# nothing was added
raise Exception("Loop in the dependency graph")
return res
def list_versions(self, name):
"""
given the name of a package, retruns a sorted list of versions for
package `name` found in any repo.
"""
versions = set()
req = Req(name)
for spec in self.index.itervalues():
if req.matches(spec):
versions.add(spec['version'])
return sorted(versions, key=comparable_version)
def fetch_dist(self, dist, fetch_dir, force=False, check_md5=False,
dry_run=False):
"""
Get a distribution, i.e. copy or download the distribution into
fetch_dir.
force:
force download or copy
check_md5:
when determining if a file needs to be downloaded or copied,
check it's MD5. This is, of course, slower but more reliable
then just checking the file-size (which always done first).
Note:
* This option has option has nothing to do with checking the
MD5 of a download. The md5 is always checked when files are
downloaded (regardless of this option).
* If force=True, this option is has no effect, because the file
is forcefully downloaded, ignoring any existing file (as well
as the MD5).
"""
md5 = self.index[dist].get('md5', None)
size = self.index[dist].get('size', None)
fn = dist_naming.filename_dist(dist)
dst = join(fetch_dir, fn)
# if force is not used, see if (i) the file exists (ii) its size is
# the expected (iii) optionally, make sure the md5 is the expected.
if (not force and isfile(dst) and getsize(dst) == size and
(not check_md5 or md5_file(dst) == md5)):
if self.verbose:
print "Not forcing refetch, %r already exists" % dst
return
pprint_fn_action(fn,
['copying', 'downloading'][dist.startswith('http://')])
if dry_run:
return
if self.verbose:
print "Copying: %r" % dist
print " to: %r" % dst
fo = open(dst + '.part', 'wb')
write_data_from_url(fo, dist, md5, size)
fo.close()
rm_rf(dst)
os.rename(dst + '.part', dst)
def dirname_repo(self, repo):
if repo.startswith('file://'):
return repo[7:].rstrip(r'\/')
return None
def index_file(self, filename, repo):
"""
Add an unindexed distribution, which must already exist in a local
repository to the index (in memory). Note that the index file on
disk remains unchanged.
"""
assert filename == basename(filename), filename
dist = repo + filename
if self.verbose:
print "Adding %r to index" % dist
arcname = 'EGG-INFO/spec/depend'
z = zipfile.ZipFile(join(self.dirname_repo(repo), filename))
if arcname not in z.namelist():
z.close()
raise Exception("zipfile %r has no arcname=%r" %
(filename, arcname))
spec = metadata.parse_data(z.read(arcname))
z.close()
add_Reqs_to_spec(spec)
self.index[dist] = spec
def index_all_files(self, repo):
"""
Add all distributions to the index, see index_file() above.
Note that no index file is written to disk.
"""
dir_path = self.dirname_repo(repo)
assert isdir(dir_path), dir_path
for fn in os.listdir(dir_path):
if not fn.endswith('.egg'):
continue
if not dist_naming.is_valid_eggname(fn):
print "WARNING: ignoring invalid egg name:", join(dir_path, fn)
continue
self.index_file(fn, repo)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
KubernetesExecutor
.. seealso::
For more information on how the KubernetesExecutor works, take a look at the guide:
:ref:`executor:KubernetesExecutor`
"""
import functools
import json
import multiprocessing
import time
from datetime import timedelta
from queue import Empty, Queue
from typing import Any, Dict, List, Optional, Tuple
from kubernetes import client, watch
from kubernetes.client import Configuration, models as k8s
from kubernetes.client.rest import ApiException
from urllib3.exceptions import ReadTimeoutError
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import NOT_STARTED_MESSAGE, BaseExecutor, CommandType
from airflow.kubernetes import pod_generator
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import annotations_to_key, create_pod_id
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
from airflow.settings import pod_mutation_hook
from airflow.utils import timezone
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
# TaskInstance key, command, configuration, pod_template_file
KubernetesJobType = Tuple[TaskInstanceKey, CommandType, Any, Optional[str]]
# key, state, pod_id, namespace, resource_version
KubernetesResultsType = Tuple[TaskInstanceKey, Optional[str], str, str, str]
# pod_id, namespace, state, annotations, resource_version
KubernetesWatchType = Tuple[str, str, Optional[str], Dict[str, str], str]
class ResourceVersion:
"""Singleton for tracking resourceVersion from Kubernetes"""
_instance = None
resource_version = "0"
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
class KubernetesJobWatcher(multiprocessing.Process, LoggingMixin):
"""Watches for Kubernetes jobs"""
def __init__(
self,
namespace: Optional[str],
multi_namespace_mode: bool,
watcher_queue: 'Queue[KubernetesWatchType]',
resource_version: Optional[str],
scheduler_job_id: str,
kube_config: Configuration,
):
super().__init__()
self.namespace = namespace
self.multi_namespace_mode = multi_namespace_mode
self.scheduler_job_id = scheduler_job_id
self.watcher_queue = watcher_queue
self.resource_version = resource_version
self.kube_config = kube_config
def run(self) -> None:
"""Performs watching"""
kube_client: client.CoreV1Api = get_kube_client()
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
while True:
try:
self.resource_version = self._run(
kube_client, self.resource_version, self.scheduler_job_id, self.kube_config
)
except ReadTimeoutError:
self.log.warning(
"There was a timeout error accessing the Kube API. Retrying request.", exc_info=True
)
time.sleep(1)
except Exception:
self.log.exception('Unknown error in KubernetesJobWatcher. Failing')
raise
else:
self.log.warning(
'Watch died gracefully, starting back up with: last resource_version: %s',
self.resource_version,
)
def _run(
self,
kube_client: client.CoreV1Api,
resource_version: Optional[str],
scheduler_job_id: str,
kube_config: Any,
) -> Optional[str]:
self.log.info('Event: and now my watch begins starting at resource_version: %s', resource_version)
watcher = watch.Watch()
kwargs = {'label_selector': f'airflow-worker={scheduler_job_id}'}
if resource_version:
kwargs['resource_version'] = resource_version
if kube_config.kube_client_request_args:
for key, value in kube_config.kube_client_request_args.items():
kwargs[key] = value
last_resource_version: Optional[str] = None
if self.multi_namespace_mode:
list_worker_pods = functools.partial(
watcher.stream, kube_client.list_pod_for_all_namespaces, **kwargs
)
else:
list_worker_pods = functools.partial(
watcher.stream, kube_client.list_namespaced_pod, self.namespace, **kwargs
)
for event in list_worker_pods():
task = event['object']
self.log.info('Event: %s had an event of type %s', task.metadata.name, event['type'])
if event['type'] == 'ERROR':
return self.process_error(event)
annotations = task.metadata.annotations
task_instance_related_annotations = {
'dag_id': annotations['dag_id'],
'task_id': annotations['task_id'],
'execution_date': annotations.get('execution_date'),
'run_id': annotations.get('run_id'),
'try_number': annotations['try_number'],
}
map_index = annotations.get('map_index')
if map_index is not None:
task_instance_related_annotations['map_index'] = map_index
self.process_status(
pod_id=task.metadata.name,
namespace=task.metadata.namespace,
status=task.status.phase,
annotations=task_instance_related_annotations,
resource_version=task.metadata.resource_version,
event=event,
)
last_resource_version = task.metadata.resource_version
return last_resource_version
def process_error(self, event: Any) -> str:
"""Process error response"""
self.log.error('Encountered Error response from k8s list namespaced pod stream => %s', event)
raw_object = event['raw_object']
if raw_object['code'] == 410:
self.log.info(
'Kubernetes resource version is too old, must reset to 0 => %s', (raw_object['message'],)
)
# Return resource version 0
return '0'
raise AirflowException(
f"Kubernetes failure for {raw_object['reason']} with code {raw_object['code']} and message: "
f"{raw_object['message']}"
)
def process_status(
self,
pod_id: str,
namespace: str,
status: str,
annotations: Dict[str, str],
resource_version: str,
event: Any,
) -> None:
"""Process status response"""
if status == 'Pending':
if event['type'] == 'DELETED':
self.log.info('Event: Failed to start pod %s', pod_id)
self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
else:
self.log.info('Event: %s Pending', pod_id)
elif status == 'Failed':
self.log.error('Event: %s Failed', pod_id)
self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
elif status == 'Succeeded':
self.log.info('Event: %s Succeeded', pod_id)
self.watcher_queue.put((pod_id, namespace, None, annotations, resource_version))
elif status == 'Running':
if event['type'] == 'DELETED':
self.log.info('Event: Pod %s deleted before it could complete', pod_id)
self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
else:
self.log.info('Event: %s is Running', pod_id)
else:
self.log.warning(
'Event: Invalid state: %s on pod: %s in namespace %s with annotations: %s with '
'resource_version: %s',
status,
pod_id,
namespace,
annotations,
resource_version,
)
class AirflowKubernetesScheduler(LoggingMixin):
"""Airflow Scheduler for Kubernetes"""
def __init__(
self,
kube_config: Any,
task_queue: 'Queue[KubernetesJobType]',
result_queue: 'Queue[KubernetesResultsType]',
kube_client: client.CoreV1Api,
scheduler_job_id: str,
):
super().__init__()
self.log.debug("Creating Kubernetes executor")
self.kube_config = kube_config
self.task_queue = task_queue
self.result_queue = result_queue
self.namespace = self.kube_config.kube_namespace
self.log.debug("Kubernetes using namespace %s", self.namespace)
self.kube_client = kube_client
self._manager = multiprocessing.Manager()
self.watcher_queue = self._manager.Queue()
self.scheduler_job_id = scheduler_job_id
self.kube_watcher = self._make_kube_watcher()
def run_pod_async(self, pod: k8s.V1Pod, **kwargs):
"""Runs POD asynchronously"""
pod_mutation_hook(pod)
sanitized_pod = self.kube_client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self.kube_client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception('Exception when attempting to create Namespaced Pod: %s', json_pod)
raise e
return resp
def _make_kube_watcher(self) -> KubernetesJobWatcher:
resource_version = ResourceVersion().resource_version
watcher = KubernetesJobWatcher(
watcher_queue=self.watcher_queue,
namespace=self.kube_config.kube_namespace,
multi_namespace_mode=self.kube_config.multi_namespace_mode,
resource_version=resource_version,
scheduler_job_id=self.scheduler_job_id,
kube_config=self.kube_config,
)
watcher.start()
return watcher
def _health_check_kube_watcher(self):
if self.kube_watcher.is_alive():
self.log.debug("KubeJobWatcher alive, continuing")
else:
self.log.error(
'Error while health checking kube watcher process. Process died for unknown reasons'
)
self.kube_watcher = self._make_kube_watcher()
def run_next(self, next_job: KubernetesJobType) -> None:
"""
The run_next command will check the task_queue for any un-run jobs.
It will then create a unique job-id, launch that job in the cluster,
and store relevant info in the current_jobs map so we can track the job's
status
"""
self.log.info('Kubernetes job is %s', str(next_job).replace("\n", " "))
key, command, kube_executor_config, pod_template_file = next_job
dag_id, task_id, run_id, try_number, map_index = key
if command[0:3] != ["airflow", "tasks", "run"]:
raise ValueError('The command must start with ["airflow", "tasks", "run"].')
base_worker_pod = get_base_pod_from_template(pod_template_file, self.kube_config)
if not base_worker_pod:
raise AirflowException(
f"could not find a valid worker template yaml at {self.kube_config.pod_template_file}"
)
pod = PodGenerator.construct_pod(
namespace=self.namespace,
scheduler_job_id=self.scheduler_job_id,
pod_id=create_pod_id(dag_id, task_id),
dag_id=dag_id,
task_id=task_id,
kube_image=self.kube_config.kube_image,
try_number=try_number,
map_index=map_index,
date=None,
run_id=run_id,
args=command,
pod_override_object=kube_executor_config,
base_worker_pod=base_worker_pod,
)
# Reconcile the pod generated by the Operator and the Pod
# generated by the .cfg file
self.log.debug("Kubernetes running for command %s", command)
self.log.debug("Kubernetes launching image %s", pod.spec.containers[0].image)
# the watcher will monitor pods, so we do not block.
self.run_pod_async(pod, **self.kube_config.kube_client_request_args)
self.log.debug("Kubernetes Job created!")
def delete_pod(self, pod_id: str, namespace: str) -> None:
"""Deletes POD"""
try:
self.log.debug("Deleting pod %s in namespace %s", pod_id, namespace)
self.kube_client.delete_namespaced_pod(
pod_id,
namespace,
body=client.V1DeleteOptions(**self.kube_config.delete_option_kwargs),
**self.kube_config.kube_client_request_args,
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def sync(self) -> None:
"""
The sync function checks the status of all currently running kubernetes jobs.
If a job is completed, its status is placed in the result queue to
be sent back to the scheduler.
:return:
"""
self.log.debug("Syncing KubernetesExecutor")
self._health_check_kube_watcher()
while True:
try:
task = self.watcher_queue.get_nowait()
try:
self.log.debug("Processing task %s", task)
self.process_watcher_task(task)
finally:
self.watcher_queue.task_done()
except Empty:
break
def process_watcher_task(self, task: KubernetesWatchType) -> None:
"""Process the task by watcher."""
pod_id, namespace, state, annotations, resource_version = task
self.log.info(
'Attempting to finish pod; pod_id: %s; state: %s; annotations: %s', pod_id, state, annotations
)
key = annotations_to_key(annotations=annotations)
if key:
self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
self.result_queue.put((key, state, pod_id, namespace, resource_version))
def _flush_watcher_queue(self) -> None:
self.log.debug('Executor shutting down, watcher_queue approx. size=%d', self.watcher_queue.qsize())
while True:
try:
task = self.watcher_queue.get_nowait()
# Ignoring it since it can only have either FAILED or SUCCEEDED pods
self.log.warning('Executor shutting down, IGNORING watcher task=%s', task)
self.watcher_queue.task_done()
except Empty:
break
def terminate(self) -> None:
"""Terminates the watcher."""
self.log.debug("Terminating kube_watcher...")
self.kube_watcher.terminate()
self.kube_watcher.join()
self.log.debug("kube_watcher=%s", self.kube_watcher)
self.log.debug("Flushing watcher_queue...")
self._flush_watcher_queue()
# Queue should be empty...
self.watcher_queue.join()
self.log.debug("Shutting down manager...")
self._manager.shutdown()
def get_base_pod_from_template(pod_template_file: Optional[str], kube_config: Any) -> k8s.V1Pod:
"""
Reads either the pod_template_file set in the executor_config or the base pod_template_file
set in the airflow.cfg to craft a "base pod" that will be used by the KubernetesExecutor
:param pod_template_file: absolute path to a pod_template_file.yaml or None
:param kube_config: The KubeConfig class generated by airflow that contains all kube metadata
:return: a V1Pod that can be used as the base pod for k8s tasks
"""
if pod_template_file:
return PodGenerator.deserialize_model_file(pod_template_file)
else:
return PodGenerator.deserialize_model_file(kube_config.pod_template_file)
class KubernetesExecutor(BaseExecutor):
"""Executor for Kubernetes"""
supports_ad_hoc_ti_run: bool = True
def __init__(self):
self.kube_config = KubeConfig()
self._manager = multiprocessing.Manager()
self.task_queue: 'Queue[KubernetesJobType]' = self._manager.Queue()
self.result_queue: 'Queue[KubernetesResultsType]' = self._manager.Queue()
self.kube_scheduler: Optional[AirflowKubernetesScheduler] = None
self.kube_client: Optional[client.CoreV1Api] = None
self.scheduler_job_id: Optional[str] = None
self.event_scheduler: Optional[EventScheduler] = None
self.last_handled: Dict[TaskInstanceKey, float] = {}
super().__init__(parallelism=self.kube_config.parallelism)
@provide_session
def clear_not_launched_queued_tasks(self, session=None) -> None:
"""
Tasks can end up in a "Queued" state through either the executor being
abruptly shut down (leaving a non-empty task_queue on this executor)
or when a rescheduled/deferred operator comes back up for execution
(with the same try_number) before the pod of its previous incarnation
has been fully removed (we think).
This method checks each of those tasks to see if the corresponding pod
is around, and if not, and there's no matching entry in our own
task_queue, marks it for re-execution.
"""
self.log.debug("Clearing tasks that have not been launched")
if not self.kube_client:
raise AirflowException(NOT_STARTED_MESSAGE)
queued_tis: List[TaskInstance] = (
session.query(TaskInstance).filter(TaskInstance.state == State.QUEUED).all()
)
self.log.info('Found %s queued task instances', len(queued_tis))
# Go through the "last seen" dictionary and clean out old entries
allowed_age = self.kube_config.worker_pods_queued_check_interval * 3
for key, timestamp in list(self.last_handled.items()):
if time.time() - timestamp > allowed_age:
del self.last_handled[key]
for ti in queued_tis:
self.log.debug("Checking task instance %s", ti)
# Check to see if we've handled it ourselves recently
if ti.key in self.last_handled:
continue
# Build the pod selector
base_label_selector = (
f"dag_id={pod_generator.make_safe_label_value(ti.dag_id)},"
f"task_id={pod_generator.make_safe_label_value(ti.task_id)},"
f"airflow-worker={pod_generator.make_safe_label_value(str(ti.queued_by_job_id))}"
)
if ti.map_index >= 0:
# Old tasks _couldn't_ be mapped, so we don't have to worry about compat
base_label_selector += f',map_index={ti.map_index}'
kwargs = dict(label_selector=base_label_selector)
if self.kube_config.kube_client_request_args:
kwargs.update(**self.kube_config.kube_client_request_args)
# Try run_id first
kwargs['label_selector'] += ',run_id=' + pod_generator.make_safe_label_value(ti.run_id)
pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs)
if pod_list.items:
continue
# Fallback to old style of using execution_date
kwargs['label_selector'] = (
f'{base_label_selector},'
f'execution_date={pod_generator.datetime_to_label_safe_datestring(ti.execution_date)}'
)
pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs)
if pod_list.items:
continue
self.log.info('TaskInstance: %s found in queued state but was not launched, rescheduling', ti)
session.query(TaskInstance).filter(
TaskInstance.dag_id == ti.dag_id,
TaskInstance.task_id == ti.task_id,
TaskInstance.run_id == ti.run_id,
).update({TaskInstance.state: State.SCHEDULED})
def start(self) -> None:
"""Starts the executor"""
self.log.info('Start Kubernetes executor')
if not self.job_id:
raise AirflowException("Could not get scheduler_job_id")
self.scheduler_job_id = str(self.job_id)
self.log.debug('Start with scheduler_job_id: %s', self.scheduler_job_id)
self.kube_client = get_kube_client()
self.kube_scheduler = AirflowKubernetesScheduler(
self.kube_config, self.task_queue, self.result_queue, self.kube_client, self.scheduler_job_id
)
self.event_scheduler = EventScheduler()
self.event_scheduler.call_regular_interval(
self.kube_config.worker_pods_pending_timeout_check_interval,
self._check_worker_pods_pending_timeout,
)
self.event_scheduler.call_regular_interval(
self.kube_config.worker_pods_queued_check_interval,
self.clear_not_launched_queued_tasks,
)
# We also call this at startup as that's the most likely time to see
# stuck queued tasks
self.clear_not_launched_queued_tasks()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
"""Executes task asynchronously"""
self.log.info('Add task %s with command %s with executor_config %s', key, command, executor_config)
try:
kube_executor_config = PodGenerator.from_obj(executor_config)
except Exception:
self.log.error("Invalid executor_config for %s", key)
self.fail(key=key, info="Invalid executor_config passed")
return
if executor_config:
pod_template_file = executor_config.get("pod_template_file", None)
else:
pod_template_file = None
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.event_buffer[key] = (State.QUEUED, self.scheduler_job_id)
self.task_queue.put((key, command, kube_executor_config, pod_template_file))
# We keep a temporary local record that we've handled this so we don't
# try and remove it from the QUEUED state while we process it
self.last_handled[key] = time.time()
def sync(self) -> None:
"""Synchronize task state."""
if self.running:
self.log.debug('self.running: %s', self.running)
if self.queued_tasks:
self.log.debug('self.queued: %s', self.queued_tasks)
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_config:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.event_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
self.kube_scheduler.sync()
last_resource_version = None
while True:
try:
results = self.result_queue.get_nowait()
try:
key, state, pod_id, namespace, resource_version = results
last_resource_version = resource_version
self.log.info('Changing state of %s to %s', results, state)
try:
self._change_state(key, state, pod_id, namespace)
except Exception as e:
self.log.exception(
"Exception: %s when attempting to change state of %s to %s, re-queueing.",
e,
results,
state,
)
self.result_queue.put(results)
finally:
self.result_queue.task_done()
except Empty:
break
resource_instance = ResourceVersion()
resource_instance.resource_version = last_resource_version or resource_instance.resource_version
for _ in range(self.kube_config.worker_pods_creation_batch_size):
try:
task = self.task_queue.get_nowait()
try:
self.kube_scheduler.run_next(task)
except ApiException as e:
# These codes indicate something is wrong with pod definition; otherwise we assume pod
# definition is ok, and that retrying may work
if e.status in (400, 422):
self.log.error("Pod creation failed with reason %r. Failing task", e.reason)
key, _, _, _ = task
self.change_state(key, State.FAILED, e)
else:
self.log.warning(
'ApiException when attempting to run task, re-queueing. Reason: %r. Message: %s',
e.reason,
json.loads(e.body)['message'],
)
self.task_queue.put(task)
finally:
self.task_queue.task_done()
except Empty:
break
# Run any pending timed events
next_event = self.event_scheduler.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
def _check_worker_pods_pending_timeout(self):
"""Check if any pending worker pods have timed out"""
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
timeout = self.kube_config.worker_pods_pending_timeout
self.log.debug('Looking for pending worker pods older than %d seconds', timeout)
kwargs = {
'limit': self.kube_config.worker_pods_pending_timeout_batch_size,
'field_selector': 'status.phase=Pending',
'label_selector': f'airflow-worker={self.scheduler_job_id}',
**self.kube_config.kube_client_request_args,
}
if self.kube_config.multi_namespace_mode:
pending_pods = functools.partial(self.kube_client.list_pod_for_all_namespaces, **kwargs)
else:
pending_pods = functools.partial(
self.kube_client.list_namespaced_pod, self.kube_config.kube_namespace, **kwargs
)
cutoff = timezone.utcnow() - timedelta(seconds=timeout)
for pod in pending_pods().items:
self.log.debug(
'Found a pending pod "%s", created "%s"', pod.metadata.name, pod.metadata.creation_timestamp
)
if pod.metadata.creation_timestamp < cutoff:
self.log.error(
(
'Pod "%s" has been pending for longer than %d seconds.'
'It will be deleted and set to failed.'
),
pod.metadata.name,
timeout,
)
self.kube_scheduler.delete_pod(pod.metadata.name, pod.metadata.namespace)
def _change_state(self, key: TaskInstanceKey, state: Optional[str], pod_id: str, namespace: str) -> None:
if state != State.RUNNING:
if self.kube_config.delete_worker_pods:
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
if state != State.FAILED or self.kube_config.delete_worker_pods_on_failure:
self.kube_scheduler.delete_pod(pod_id, namespace)
self.log.info('Deleted pod: %s in namespace %s', str(key), str(namespace))
try:
self.running.remove(key)
except KeyError:
self.log.debug('Could not find key: %s', str(key))
self.event_buffer[key] = state, None
def try_adopt_task_instances(self, tis: List[TaskInstance]) -> List[TaskInstance]:
tis_to_flush = [ti for ti in tis if not ti.queued_by_job_id]
scheduler_job_ids = {ti.queued_by_job_id for ti in tis}
pod_ids = {ti.key: ti for ti in tis if ti.queued_by_job_id}
kube_client: client.CoreV1Api = self.kube_client
for scheduler_job_id in scheduler_job_ids:
scheduler_job_id = pod_generator.make_safe_label_value(str(scheduler_job_id))
kwargs = {'label_selector': f'airflow-worker={scheduler_job_id}'}
pod_list = kube_client.list_namespaced_pod(namespace=self.kube_config.kube_namespace, **kwargs)
for pod in pod_list.items:
self.adopt_launched_task(kube_client, pod, pod_ids)
self._adopt_completed_pods(kube_client)
tis_to_flush.extend(pod_ids.values())
return tis_to_flush
def adopt_launched_task(
self, kube_client: client.CoreV1Api, pod: k8s.V1Pod, pod_ids: Dict[TaskInstanceKey, k8s.V1Pod]
) -> None:
"""
Patch existing pod so that the current KubernetesJobWatcher can monitor it via label selectors
:param kube_client: kubernetes client for speaking to kube API
:param pod: V1Pod spec that we will patch with new label
:param pod_ids: pod_ids we expect to patch.
"""
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.info("attempting to adopt pod %s", pod.metadata.name)
pod.metadata.labels['airflow-worker'] = pod_generator.make_safe_label_value(self.scheduler_job_id)
pod_id = annotations_to_key(pod.metadata.annotations)
if pod_id not in pod_ids:
self.log.error("attempting to adopt taskinstance which was not specified by database: %s", pod_id)
return
try:
kube_client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body=PodGenerator.serialize_pod(pod),
)
pod_ids.pop(pod_id)
self.running.add(pod_id)
except ApiException as e:
self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)
def _adopt_completed_pods(self, kube_client: client.CoreV1Api) -> None:
"""
Patch completed pod so that the KubernetesJobWatcher can delete it.
:param kube_client: kubernetes client for speaking to kube API
"""
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
kwargs = {
'field_selector': "status.phase=Succeeded",
'label_selector': 'kubernetes_executor=True',
}
pod_list = kube_client.list_namespaced_pod(namespace=self.kube_config.kube_namespace, **kwargs)
for pod in pod_list.items:
self.log.info("Attempting to adopt pod %s", pod.metadata.name)
pod.metadata.labels['airflow-worker'] = pod_generator.make_safe_label_value(self.scheduler_job_id)
try:
kube_client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body=PodGenerator.serialize_pod(pod),
)
except ApiException as e:
self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)
def _flush_task_queue(self) -> None:
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.debug('Executor shutting down, task_queue approximate size=%d', self.task_queue.qsize())
while True:
try:
task = self.task_queue.get_nowait()
# This is a new task to run thus ok to ignore.
self.log.warning('Executor shutting down, will NOT run task=%s', task)
self.task_queue.task_done()
except Empty:
break
def _flush_result_queue(self) -> None:
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.debug('Executor shutting down, result_queue approximate size=%d', self.result_queue.qsize())
while True:
try:
results = self.result_queue.get_nowait()
self.log.warning('Executor shutting down, flushing results=%s', results)
try:
key, state, pod_id, namespace, resource_version = results
self.log.info(
'Changing state of %s to %s : resource_version=%d', results, state, resource_version
)
try:
self._change_state(key, state, pod_id, namespace)
except Exception as e:
self.log.exception(
'Ignoring exception: %s when attempting to change state of %s to %s.',
e,
results,
state,
)
finally:
self.result_queue.task_done()
except Empty:
break
def end(self) -> None:
"""Called when the executor shuts down"""
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.info('Shutting down Kubernetes executor')
self.log.debug('Flushing task_queue...')
self._flush_task_queue()
self.log.debug('Flushing result_queue...')
self._flush_result_queue()
# Both queues should be empty...
self.task_queue.join()
self.result_queue.join()
if self.kube_scheduler:
self.kube_scheduler.terminate()
self._manager.shutdown()
def terminate(self):
"""Terminate the executor is not doing anything."""
|
|
"""Data iterators for common data formats."""
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import sys
import ctypes
import logging
import threading
import numpy as np
from .base import _LIB
from .base import c_array, c_str, mx_uint, py_str
from .base import DataIterHandle, NDArrayHandle
from .base import mx_real_t
from .base import check_call, build_param_doc as _build_param_doc
from .ndarray import NDArray
from .ndarray import array
from .ndarray import concatenate
class DataDesc(namedtuple('DataDesc', ['name', 'shape'])):
"""DataDesc is used to store name, shape, type and layout
information of the data or the label.
The `layout` describes how the axes in `shape` should be interpreted,
for example for image data setting `layout=NCHW` indicates
that the first axis is number of examples in the batch(N),
C is number of channels, H is the height and W is the width of the image.
For sequential data, by default `layout` is set to ``NTC``, where
N is number of examples in the batch, T the temporal axis representing time
and C is the number of channels.
Parameters
----------
cls : DataDesc
The class.
name : str
Data name.
shape : tuple of int
Data shape.
dtype : np.dtype, optional
Data type.
layout : str, optional
Data layout.
"""
def __new__(cls, name, shape, dtype=mx_real_t, layout='NCHW'):
ret = super(cls, DataDesc).__new__(cls, name, shape)
ret.dtype = dtype
ret.layout = layout
return ret
def __repr__(self):
return "DataDesc[%s,%s,%s,%s]" % (self.name, self.shape, self.dtype,
self.layout)
@staticmethod
def get_batch_axis(layout):
"""Get the dimension that corresponds to the batch size.
When data parallelism is used, the data will be automatically split and
concatenated along the batch-size dimension. Axis can be -1, which means
the whole array will be copied for each data-parallelism device.
Parameters
----------
layout : str
layout string. For example, "NCHW".
Returns
-------
int
An axis indicating the batch_size dimension.
"""
if layout is None:
return 0
return layout.find('N')
@staticmethod
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name, shape)
types : a tuple of (name, type)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes]
class DataBatch(object):
"""A data batch.
MXNet's data iterator returns a batch of data for each `next` call.
This data contains `batch_size` number of examples.
If the input data consists of images, then shape of these images depend on
the `layout` attribute of `DataDesc` object in `provide_data` parameter.
If `layout` is set to 'NCHW' then, images should be stored in a 4-D matrix
of shape ``(batch_size, num_channel, height, width)``.
If `layout` is set to 'NHWC' then, images should be stored in a 4-D matrix
of shape ``(batch_size, height, width, num_channel)``.
The channels are often in RGB order.
Parameters
----------
data : list of `NDArray`, each array containing `batch_size` examples.
A list of input data.
label : list of `NDArray`, each array often containing a 1-dimensional array. optional
A list of input labels.
pad : int, optional
The number of examples padded at the end of a batch. It is used when the
total number of examples read is not divisible by the `batch_size`.
These extra padded examples are ignored in prediction.
index : numpy.array, optional
The example indices in this batch.
bucket_key : int, optional
The bucket key, used for bucketing module.
provide_data : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the data.
The *i*-th element describes the name and shape of ``data[i]``.
provide_label : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the label.
The *i*-th element describes the name and shape of ``label[i]``.
"""
def __init__(self, data, label=None, pad=None, index=None,
bucket_key=None, provide_data=None, provide_label=None):
if data is not None:
assert isinstance(data, (list, tuple)), "Data must be list of NDArrays"
if label is not None:
assert isinstance(label, (list, tuple)), "Label must be list of NDArrays"
self.data = data
self.label = label
self.pad = pad
self.index = index
self.bucket_key = bucket_key
self.provide_data = provide_data
self.provide_label = provide_label
def __str__(self):
data_shapes = [d.shape for d in self.data]
label_shapes = [l.shape for l in self.label]
return "{}: data shapes: {} label shapes: {}".format(
self.__class__.__name__,
data_shapes,
label_shapes)
class DataIter(object):
"""The base class for an MXNet data iterator.
All I/O in MXNet is handled by specializations of this class. Data iterators
in MXNet are similar to standard-iterators in Python. On each call to `next`
they return a `DataBatch` which represents the next batch of data. When
there is no more data to return, it raises a `StopIteration` exception.
Parameters
----------
batch_size : int, optional
The batch size, namely the number of items in the batch.
See Also
--------
NDArrayIter : Data-iterator for MXNet NDArray or numpy-ndarray objects.
CSVIter : Data-iterator for csv data.
ImageIter : Data-iterator for images.
"""
def __init__(self, batch_size=0):
self.batch_size = batch_size
def __iter__(self):
return self
def reset(self):
"""Reset the iterator to the begin of the data."""
pass
def next(self):
"""Get next data batch from iterator.
Returns
-------
DataBatch
The data of next batch.
Raises
------
StopIteration
If the end of the data is reached.
"""
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=self.getindex())
else:
raise StopIteration
def __next__(self):
return self.next()
def iter_next(self):
"""Move to the next batch.
Returns
-------
boolean
Whether the move is successful.
"""
pass
def getdata(self):
"""Get data of current batch.
Returns
-------
list of NDArray
The data of the current batch.
"""
pass
def getlabel(self):
"""Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch.
"""
pass
def getindex(self):
"""Get index of the current batch.
Returns
-------
index : numpy.array
The indices of examples in the current batch.
"""
return None
def getpad(self):
"""Get the number of padding examples in the current batch.
Returns
-------
int
Number of padding examples in the current batch.
"""
pass
class ResizeIter(DataIter):
"""Resize a data iterator to a given number of batches.
Parameters
----------
data_iter : DataIter
The data iterator to be resized.
size : int
The number of batches per epoch to resize to.
reset_internal : bool
Whether to reset internal iterator on ResizeIter.reset.
Examples
--------
>>> nd_iter = mx.io.NDArrayIter(mx.nd.ones((100,10)), batch_size=25)
>>> resize_iter = mx.io.ResizeIter(nd_iter, 2)
>>> for batch in resize_iter:
... print(batch.data)
[<NDArray 25x10 @cpu(0)>]
[<NDArray 25x10 @cpu(0)>]
"""
def __init__(self, data_iter, size, reset_internal=True):
super(ResizeIter, self).__init__()
self.data_iter = data_iter
self.size = size
self.reset_internal = reset_internal
self.cur = 0
self.current_batch = None
self.provide_data = data_iter.provide_data
self.provide_label = data_iter.provide_label
self.batch_size = data_iter.batch_size
if hasattr(data_iter, 'default_bucket_key'):
self.default_bucket_key = data_iter.default_bucket_key
def reset(self):
self.cur = 0
if self.reset_internal:
self.data_iter.reset()
def iter_next(self):
if self.cur == self.size:
return False
try:
self.current_batch = self.data_iter.next()
except StopIteration:
self.data_iter.reset()
self.current_batch = self.data_iter.next()
self.cur += 1
return True
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class PrefetchingIter(DataIter):
"""Performs pre-fetch for other data iterators.
This iterator will create another thread to perform ``iter_next`` and then
store the data in memory. It potentially accelerates the data read, at the
cost of more memory usage.
Parameters
----------
iters : DataIter or list of DataIter
The data iterators to be pre-fetched.
rename_data : None or list of dict
The *i*-th element is a renaming map for the *i*-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data.
rename_label : None or list of dict
Similar to ``rename_data``.
Examples
--------
>>> iter1 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> iter2 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> piter = mx.io.PrefetchingIter([iter1, iter2],
... rename_data=[{'data': 'data_1'}, {'data': 'data_2'}])
>>> print(piter.provide_data)
[DataDesc[data_1,(25, 10L),<type 'numpy.float32'>,NCHW],
DataDesc[data_2,(25, 10L),<type 'numpy.float32'>,NCHW]]
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter > 0
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = self.provide_data[0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for i in self.data_taken:
i.set()
self.started = True
self.current_batch = [None for i in range(self.n_iter)]
self.next_batch = [None for i in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for i in self.data_taken:
i.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for i in self.data_ready:
i.wait()
for i in self.iters:
i.reset()
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
def iter_next(self):
for i in self.data_ready:
i.wait()
if self.next_batch[0] is None:
for i in self.next_batch:
assert i is None, "Number of entry mismatches between iterators"
return False
else:
for batch in self.next_batch:
assert batch.pad == self.next_batch[0].pad, \
"Number of entry mismatches between iterators"
self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []),
sum([batch.label for batch in self.next_batch], []),
self.next_batch[0].pad,
self.next_batch[0].index,
provide_data=self.provide_data,
provide_label=self.provide_label)
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
def _init_data(data, allow_empty, default_name):
"""Convert data into canonical form."""
assert (data is not None) or allow_empty
if data is None:
data = []
if isinstance(data, (np.ndarray, NDArray)):
data = [data]
if isinstance(data, list):
if not allow_empty:
assert(len(data) > 0)
if len(data) == 1:
data = OrderedDict([(default_name, data[0])]) # pylint: disable=redefined-variable-type
else:
data = OrderedDict( # pylint: disable=redefined-variable-type
[('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])
if not isinstance(data, dict):
raise TypeError("Input must be NDArray, numpy.ndarray, " + \
"a list of them or dict with them as values")
for k, v in data.items():
if not isinstance(v, NDArray):
try:
data[k] = array(v)
except:
raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \
"should be NDArray or numpy.ndarray")
return list(data.items())
class NDArrayIter(DataIter):
"""Returns an iterator for ``mx.nd.NDArray`` or ``numpy.ndarray``.
Example usage:
----------
>>> data = np.arange(40).reshape((10,2,2))
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> for batch in dataiter:
... print batch.data[0].asnumpy()
... batch.data[0].shape
...
[[[ 36. 37.]
[ 38. 39.]]
[[ 16. 17.]
[ 18. 19.]]
[[ 12. 13.]
[ 14. 15.]]]
(3L, 2L, 2L)
[[[ 32. 33.]
[ 34. 35.]]
[[ 4. 5.]
[ 6. 7.]]
[[ 24. 25.]
[ 26. 27.]]]
(3L, 2L, 2L)
[[[ 8. 9.]
[ 10. 11.]]
[[ 20. 21.]
[ 22. 23.]]
[[ 28. 29.]
[ 30. 31.]]]
(3L, 2L, 2L)
>>> dataiter.provide_data # Returns a list of `DataDesc`
[DataDesc[data,(3, 2L, 2L),<type 'numpy.float32'>,NCHW]]
>>> dataiter.provide_label # Returns a list of `DataDesc`
[DataDesc[softmax_label,(3, 1L),<type 'numpy.float32'>,NCHW]]
In the above example, data is shuffled as `shuffle` parameter is set to `True`
and remaining examples are discarded as `last_batch_handle` parameter is set to `discard`.
Usage of `last_batch_handle` parameter:
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='pad')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Padding added after the examples read are over. So, 10/3+1 batches are created.
4
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Remaining examples are discarded. So, 10/3 batches are created.
3
`NDArrayIter` also supports multiple input and labels.
>>> data = {'data1':np.zeros(shape=(10,2,2)), 'data2':np.zeros(shape=(20,2,2))}
>>> label = {'label1':np.zeros(shape=(10,1)), 'label2':np.zeros(shape=(20,1))}
>>> dataiter = mx.io.NDArrayIter(data, label, 3, True, last_batch_handle='discard')
Parameters
----------
data: array or list of array or dict of string to array
The input data.
label: array or list of array or dict of string to array, optional
The input label.
batch_size: int
Batch size of data.
shuffle: bool, optional
Whether to shuffle the data.
last_batch_handle : str, optional
How to handle the last batch. This parameter can be 'pad', 'discard' or
'roll_over'. 'roll_over' is intended for training and can cause problems
if used for prediction.
data_name : str, optional
The data name.
label_name : str, optional
The label name.
"""
def __init__(self, data, label=None, batch_size=1, shuffle=False,
last_batch_handle='pad', data_name='data',
label_name='softmax_label'):
super(NDArrayIter, self).__init__(batch_size)
self.data = _init_data(data, allow_empty=False, default_name=data_name)
self.label = _init_data(label, allow_empty=True, default_name=label_name)
# shuffle data
if shuffle:
idx = np.arange(self.data[0][1].shape[0])
np.random.shuffle(idx)
self.data = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.data]
self.label = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.label]
# batching
if last_batch_handle == 'discard':
new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size
data_dict = OrderedDict(self.data)
label_dict = OrderedDict(self.label)
for k, _ in self.data:
data_dict[k] = data_dict[k][:new_n]
for k, _ in self.label:
label_dict[k] = label_dict[k][:new_n]
self.data = data_dict.items()
self.label = label_dict.items()
self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
self.num_source = len(self.data_list)
self.num_data = self.data_list[0].shape[0]
assert self.num_data >= batch_size, \
"batch_size need to be smaller than data size."
self.cursor = -batch_size
self.batch_size = batch_size
self.last_batch_handle = last_batch_handle
@property
def provide_data(self):
"""The name and shape of data provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.data
]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.label
]
def hard_reset(self):
"""Ignore roll over data and set to start."""
self.cursor = -self.batch_size
def reset(self):
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size
else:
self.cursor = -self.batch_size
def iter_next(self):
self.cursor += self.batch_size
return self.cursor < self.num_data
def next(self):
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=None)
else:
raise StopIteration
def _getdata(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert(self.cursor < self.num_data), "DataIter needs reset."
if self.cursor + self.batch_size <= self.num_data:
return [x[1][self.cursor:self.cursor+self.batch_size] for x in data_source]
else:
pad = self.batch_size - self.num_data + self.cursor
return [concatenate([x[1][self.cursor:], x[1][:pad]]) for x in data_source]
def getdata(self):
return self._getdata(self.data)
def getlabel(self):
return self._getdata(self.label)
def getpad(self):
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
else:
return 0
class MXDataIter(DataIter):
"""A python wrapper a C++ data iterator.
This iterator is the Python wrapper to all native C++ data iterators, such
as `CSVIter, `ImageRecordIter`, `MNISTIter`, etc. When initializing
`CSVIter` for example, you will get an `MXDataIter` instance to use in your
Python code. Calls to `next`, `reset`, etc will be delegated to the
underlying C++ data iterators.
Usually you don't need to interact with `MXDataIter` directly unless you are
implementing your own data iterators in C++. To do that, please refer to
examples under the `src/io` folder.
Parameters
----------
handle : DataIterHandle, required
The handle to the underlying C++ Data Iterator.
data_name : str, optional
Data name. Default to "data".
label_name : str, optional
Label name. Default to "softmax_label".
See Also
--------
src/io : The underlying C++ data iterator implementation, e.g., `CSVIter`.
"""
def __init__(self, handle, data_name='data', label_name='softmax_label', **_):
super(MXDataIter, self).__init__()
self.handle = handle
# debug option, used to test the speed with io effect eliminated
self._debug_skip_load = False
# load the first batch to get shape information
self.first_batch = None
self.first_batch = self.next()
data = self.first_batch.data[0]
label = self.first_batch.label[0]
# properties
self.provide_data = [DataDesc(data_name, data.shape, data.dtype)]
self.provide_label = [DataDesc(label_name, label.shape, label.dtype)]
self.batch_size = data.shape[0]
def __del__(self):
check_call(_LIB.MXDataIterFree(self.handle))
def debug_skip_load(self):
# Set the iterator to simply return always first batch. This can be used
# to test the speed of network without taking the loading delay into
# account.
self._debug_skip_load = True
logging.info('Set debug_skip_load to be true, will simply return first batch')
def reset(self):
self._debug_at_begin = True
self.first_batch = None
check_call(_LIB.MXDataIterBeforeFirst(self.handle))
def next(self):
if self._debug_skip_load and not self._debug_at_begin:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
if self.first_batch is not None:
batch = self.first_batch
self.first_batch = None
return batch
self._debug_at_begin = False
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
if next_res.value:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
else:
raise StopIteration
def iter_next(self):
if self.first_batch is not None:
return True
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
return next_res.value
def getdata(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl)))
return NDArray(hdl, False)
def getlabel(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl)))
return NDArray(hdl, False)
def getindex(self):
index_size = ctypes.c_uint64(0)
index_data = ctypes.POINTER(ctypes.c_uint64)()
check_call(_LIB.MXDataIterGetIndex(self.handle,
ctypes.byref(index_data),
ctypes.byref(index_size)))
address = ctypes.addressof(index_data.contents)
dbuffer = (ctypes.c_uint64* index_size.value).from_address(address)
np_index = np.frombuffer(dbuffer, dtype=np.uint64)
return np_index.copy()
def getpad(self):
pad = ctypes.c_int(0)
check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad)))
return pad.value
def _make_io_iterator(handle):
"""Create an io iterator by handle."""
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXDataIterGetIterInfo( \
handle, ctypes.byref(name), ctypes.byref(desc), \
ctypes.byref(num_args), \
ctypes.byref(arg_names), \
ctypes.byref(arg_types), \
ctypes.byref(arg_descs)))
iter_name = py_str(name.value)
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
doc_str = ('%s\n\n' +
'%s\n' +
'Returns\n' +
'-------\n' +
'MXDataIter\n'+
' The result iterator.')
doc_str = doc_str % (desc.value, param_str)
def creator(*args, **kwargs):
"""Create an iterator.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting data iterator.
Returns
-------
dataiter: Dataiter
The resulting data iterator.
"""
param_keys = []
param_vals = []
for k, val in kwargs.items():
param_keys.append(c_str(k))
param_vals.append(c_str(str(val)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
iter_handle = DataIterHandle()
check_call(_LIB.MXDataIterCreateIter(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(iter_handle)))
if len(args):
raise TypeError('%s can only accept keyword arguments' % iter_name)
return MXDataIter(iter_handle, **kwargs)
creator.__name__ = iter_name
creator.__doc__ = doc_str
return creator
def _init_io_module():
"""List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = ctypes.c_void_p(plist[i])
dataiter = _make_io_iterator(hdl)
setattr(module_obj, dataiter.__name__, dataiter)
_init_io_module()
|
|
# Necessary set-up.
from app import db
from app.models import *
from app.utils import *
import datetime
import time
# Drop all database tables.
db.drop_all()
# Create new database tables.
db.create_all()
# Create colored pins.
cp_yellow = ColoredPin(
color_name="Yellow",
hex_color="fdd800",
pin_image_name="mbb_yellow.png"
)
db.session.add(cp_yellow)
cp_green = ColoredPin(
color_name="Green",
hex_color="009933",
pin_image_name="mbb_green.png"
)
db.session.add(cp_green)
cp_blue = ColoredPin(
color_name="Blue",
hex_color="0f85c7",
pin_image_name="mbb_blue.png"
)
db.session.add(cp_blue)
cp_red = ColoredPin(
color_name="Red",
hex_color="ef3d23",
pin_image_name="mbb_red.png"
)
db.session.add(cp_red)
cp_orange = ColoredPin(
color_name="Orange",
hex_color="f8a11d",
pin_image_name="mbb_orange.png"
)
db.session.add(cp_orange)
cp_purple = ColoredPin(
color_name="Purple",
hex_color="84459b",
pin_image_name="mbb_purple.png"
)
db.session.add(cp_purple)
cp_aqua = ColoredPin(
color_name="Aqua",
hex_color="82D1DA",
pin_image_name="mbb_aqua.png"
)
db.session.add(cp_aqua)
cp_grey = ColoredPin(
color_name="Grey",
hex_color="CCCCCC",
pin_image_name="mbb_grey.png"
)
db.session.add(cp_grey)
cp_lavender = ColoredPin(
color_name="Lavender",
hex_color="6F6AB0",
pin_image_name="mbb_lavender.png"
)
db.session.add(cp_lavender)
cp_light_green = ColoredPin(
color_name="Light Green",
hex_color="BFD849",
pin_image_name="mbb_light_green.png"
)
db.session.add(cp_light_green)
cp_magenta = ColoredPin(
color_name="Magenta",
hex_color="B86CAC",
pin_image_name="mbb_magenta.png"
)
db.session.add(cp_magenta)
cp_maroon = ColoredPin(
color_name="Maroon",
hex_color="8B181B",
pin_image_name="mbb_maroon.png"
)
db.session.add(cp_maroon)
cp_navy = ColoredPin(
color_name="Navy",
hex_color="2D4FA0",
pin_image_name="mbb_navy.png"
)
db.session.add(cp_navy)
cp_periwinkle = ColoredPin(
color_name="Periwinkle",
hex_color="A099CB",
pin_image_name="mbb_periwinkle.png"
)
db.session.add(cp_periwinkle)
cp_pink = ColoredPin(
color_name="Pink",
hex_color="F69799",
pin_image_name="mbb_pink.png"
)
db.session.add(cp_pink)
cp_sky_blue = ColoredPin(
color_name="Sky Blue",
hex_color="9FC9EB",
pin_image_name="mbb_sky_blue.png"
)
db.session.add(cp_sky_blue)
cp_turquoise = ColoredPin(
color_name="Turquoise",
hex_color="039B81",
pin_image_name="mbb_turquoise.png"
)
db.session.add(cp_turquoise)
# Create food resource types.
frt_farmers_market = FoodResourceType(
name_singular="Farmers' Market",
name_plural="Farmers' Markets",
colored_pin=cp_yellow)
frt_food_cupboard = FoodResourceType(
name_singular="Food Cupboard",
name_plural="Food Cupboards",
colored_pin=cp_green)
frt_senior_meals = FoodResourceType(
name_singular="Senior Meals",
name_plural="Senior Meals",
colored_pin=cp_blue)
frt_share_host_site = FoodResourceType(
name_singular="SHARE Host Site",
name_plural="SHARE Host Sites",
colored_pin=cp_red)
frt_soup_kitchen = FoodResourceType(
name_singular="Soup Kitchen",
name_plural="Soup Kitchens",
colored_pin=cp_orange)
frt_wic_office = FoodResourceType(
name_singular="WIC Office",
name_plural="WIC Offices",
colored_pin=cp_purple)
db.session.add(frt_farmers_market)
db.session.add(frt_food_cupboard)
db.session.add(frt_senior_meals)
db.session.add(frt_share_host_site)
db.session.add(frt_soup_kitchen)
db.session.add(frt_wic_office)
db.session.commit()
# Create 3 admin users
u1 = User(email='ben@ben.com', password = 'pass123', is_enabled = True,
first_name = 'Ben', last_name = 'Sandler',
roles=[Role(name = 'Admin')])
u2 = User(email = 'steve@gmail.com', password = 'p@$$w0rd', is_enabled = True,
first_name = 'Steve',
last_name = 'Smith', roles = [Role(name = 'Admin')])
u3 = User(email = 'sarah@gmail.com',
password = '139rjf9i#@$#R$#!#!!!48939832984893rfcnj3@#%***^%$#@#$@#',
is_enabled = True, first_name = 'Sarah', last_name = 'Smith',
roles = [Role(name = 'Admin')])
u1.confirm_and_enable_debug()
u2.confirm_and_enable_debug()
u3.confirm_and_enable_debug()
# Add each new object to session and commit session.
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
# Create HTML objects
p1 = HTML(page = 'edit-page', value = 'Administrators will put content here.')
p2 = HTML(page = 'about-page', value = 'Administrators will put content here.')
p3 = HTML(page = 'map-announcements', value = 'Administrators will put announcements here.')
p4 = HTML(page = 'wic-info-page', value = 'Administrators will put content here.')
p5 = HTML(page = 'snap-info-page', value = 'Administrators will put content here.')
p6 = HTML(page = 'summer-info-page', value = 'Administrators will put content here.')
p7 = HTML(page = 'seniors-info-page', value = 'Administrators will put content here.')
p8 = HTML(page = 'contact-page', value = 'Administrators will put contact information here.')
p9 = HTML(page = 'farmers-info-page', value = 'Administrators will put content here.')
p10 = HTML(page = 'neighborhood-info-page', value = 'Administrators will put content here.')
p11 = HTML(page = 'share-info-page', value = 'Administrators will put content here.')
# Add each new object to session and commit session.
db.session.add(p1)
db.session.add(p2)
db.session.add(p3)
db.session.add(p4)
db.session.add(p5)
db.session.add(p6)
db.session.add(p7)
db.session.add(p8)
db.session.add(p9)
db.session.add(p10)
db.session.add(p11)
db.session.commit()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for :module:'heat.engine.clients.os.nova'."""
import collections
import uuid
import mock
from novaclient import client as nc
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_serialization import jsonutils as json
import six
from heat.common import exception
from heat.engine.clients.os import nova
from heat.engine import resource
from heat.tests import common
from heat.tests.nova import fakes as fakes_nova
from heat.tests import utils
class NovaClientPluginTestCase(common.HeatTestCase):
def setUp(self):
super(NovaClientPluginTestCase, self).setUp()
self.nova_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.nova_plugin = c.client_plugin('nova')
self.nova_plugin._client = self.nova_client
class NovaClientPluginTests(NovaClientPluginTestCase):
"""
Basic tests for the helper methods in
:module:'heat.engine.clients.os.nova'.
"""
def test_create(self):
context = utils.dummy_context()
ext_mock = self.patchobject(nc, 'discover_extensions')
plugin = context.clients.client_plugin('nova')
client = plugin.client()
ext_mock.assert_called_once_with('2')
self.assertIsNotNone(client.servers)
def test_get_ip(self):
my_image = mock.MagicMock()
my_image.addresses = {
'public': [{'version': 4,
'addr': '4.5.6.7'},
{'version': 6,
'addr': '2401:1801:7800:0101:c058:dd33:ff18:04e6'}],
'private': [{'version': 4,
'addr': '10.13.12.13'}]}
expected = '4.5.6.7'
observed = self.nova_plugin.get_ip(my_image, 'public', 4)
self.assertEqual(expected, observed)
expected = '10.13.12.13'
observed = self.nova_plugin.get_ip(my_image, 'private', 4)
self.assertEqual(expected, observed)
expected = '2401:1801:7800:0101:c058:dd33:ff18:04e6'
observed = self.nova_plugin.get_ip(my_image, 'public', 6)
self.assertEqual(expected, observed)
def test_get_flavor_id(self):
"""Tests the get_flavor_id function."""
flav_id = str(uuid.uuid4())
flav_name = 'X-Large'
my_flavor = mock.MagicMock()
my_flavor.name = flav_name
my_flavor.id = flav_id
self.nova_client.flavors.list.return_value = [my_flavor]
self.assertEqual(flav_id, self.nova_plugin.get_flavor_id(flav_name))
self.assertEqual(flav_id, self.nova_plugin.get_flavor_id(flav_id))
self.assertRaises(exception.FlavorMissing,
self.nova_plugin.get_flavor_id, 'noflavor')
self.assertEqual(3, self.nova_client.flavors.list.call_count)
self.assertEqual([(), (), ()],
self.nova_client.flavors.list.call_args_list)
def test_get_keypair(self):
"""Tests the get_keypair function."""
my_pub_key = 'a cool public key string'
my_key_name = 'mykey'
my_key = mock.MagicMock()
my_key.public_key = my_pub_key
my_key.name = my_key_name
self.nova_client.keypairs.get.side_effect = [
my_key, nova_exceptions.NotFound(404)]
self.assertEqual(my_key, self.nova_plugin.get_keypair(my_key_name))
self.assertRaises(exception.UserKeyPairMissing,
self.nova_plugin.get_keypair, 'notakey')
calls = [mock.call(my_key_name),
mock.call('notakey')]
self.nova_client.keypairs.get.assert_has_calls(calls)
def test_get_server(self):
"""Tests the get_server function."""
my_server = mock.MagicMock()
self.nova_client.servers.get.side_effect = [
my_server, nova_exceptions.NotFound(404)]
self.assertEqual(my_server, self.nova_plugin.get_server('my_server'))
self.assertRaises(exception.EntityNotFound,
self.nova_plugin.get_server, 'idontexist')
calls = [mock.call('my_server'),
mock.call('idontexist')]
self.nova_client.servers.get.assert_has_calls(calls)
def test_get_network_id_by_label(self):
"""Tests the get_net_id_by_label function."""
net = mock.MagicMock()
net.id = str(uuid.uuid4())
self.nova_client.networks.find.side_effect = [
net, nova_exceptions.NotFound(404),
nova_exceptions.NoUniqueMatch()]
self.assertEqual(net.id,
self.nova_plugin.get_net_id_by_label('net_label'))
exc = self.assertRaises(
exception.NovaNetworkNotFound,
self.nova_plugin.get_net_id_by_label, 'idontexist')
expected = 'The Nova network (idontexist) could not be found'
self.assertIn(expected, six.text_type(exc))
exc = self.assertRaises(
exception.PhysicalResourceNameAmbiguity,
self.nova_plugin.get_net_id_by_label, 'notUnique')
expected = ('Multiple physical resources were found '
'with name (notUnique)')
self.assertIn(expected, six.text_type(exc))
calls = [mock.call(label='net_label'),
mock.call(label='idontexist'),
mock.call(label='notUnique')]
self.nova_client.networks.find.assert_has_calls(calls)
def test_get_nova_network_id(self):
"""Tests the get_nova_network_id function."""
net = mock.MagicMock()
net.id = str(uuid.uuid4())
not_existent_net_id = str(uuid.uuid4())
self.nova_client.networks.get.side_effect = [
net, nova_exceptions.NotFound(404)]
self.nova_client.networks.find.side_effect = [
nova_exceptions.NotFound(404)]
self.assertEqual(net.id,
self.nova_plugin.get_nova_network_id(net.id))
exc = self.assertRaises(
exception.NovaNetworkNotFound,
self.nova_plugin.get_nova_network_id, not_existent_net_id)
expected = ('The Nova network (%s) could not be found' %
not_existent_net_id)
self.assertIn(expected, six.text_type(exc))
calls = [mock.call(net.id),
mock.call(not_existent_net_id)]
self.nova_client.networks.get.assert_has_calls(calls)
self.nova_client.networks.find.assert_called_once_with(
label=not_existent_net_id)
def test_get_status(self):
server = self.m.CreateMockAnything()
server.status = 'ACTIVE'
observed = self.nova_plugin.get_status(server)
self.assertEqual('ACTIVE', observed)
server.status = 'ACTIVE(STATUS)'
observed = self.nova_plugin.get_status(server)
self.assertEqual('ACTIVE', observed)
class NovaClientPluginRefreshServerTests(NovaClientPluginTestCase):
msg = ("ClientException: The server has either erred or is "
"incapable of performing the requested operation.")
scenarios = [
('successful_refresh', dict(
value=None,
e_raise=False)),
('overlimit_error', dict(
value=nova_exceptions.OverLimit(413, "limit reached"),
e_raise=False)),
('500_error', dict(
value=nova_exceptions.ClientException(500, msg),
e_raise=False)),
('503_error', dict(
value=nova_exceptions.ClientException(503, msg),
e_raise=False)),
('unhandled_exception', dict(
value=nova_exceptions.ClientException(501, msg),
e_raise=True)),
]
def test_refresh(self):
server = mock.MagicMock()
server.get.side_effect = [self.value]
if self.e_raise:
self.assertRaises(nova_exceptions.ClientException,
self.nova_plugin.refresh_server, server)
else:
self.assertIsNone(self.nova_plugin.refresh_server(server))
server.get.assert_called_once_with()
class NovaClientPluginFetchServerTests(NovaClientPluginTestCase):
server = mock.Mock()
# set explicitly as id and name has internal meaning in mock.Mock
server.id = '1234'
server.name = 'test_fetch_server'
msg = ("ClientException: The server has either erred or is "
"incapable of performing the requested operation.")
scenarios = [
('successful_refresh', dict(
value=server,
e_raise=False)),
('overlimit_error', dict(
value=nova_exceptions.OverLimit(413, "limit reached"),
e_raise=False)),
('500_error', dict(
value=nova_exceptions.ClientException(500, msg),
e_raise=False)),
('503_error', dict(
value=nova_exceptions.ClientException(503, msg),
e_raise=False)),
('unhandled_exception', dict(
value=nova_exceptions.ClientException(501, msg),
e_raise=True)),
]
def test_fetch_server(self):
self.nova_client.servers.get.side_effect = [self.value]
if self.e_raise:
self.assertRaises(nova_exceptions.ClientException,
self.nova_plugin.fetch_server, self.server.id)
elif isinstance(self.value, mock.Mock):
self.assertEqual(self.value,
self.nova_plugin.fetch_server(self.server.id))
else:
self.assertIsNone(self.nova_plugin.fetch_server(self.server.id))
self.nova_client.servers.get.assert_called_once_with(self.server.id)
class NovaClientPluginCheckActiveTests(NovaClientPluginTestCase):
scenarios = [
('active', dict(
status='ACTIVE',
e_raise=False)),
('deferred', dict(
status='BUILD',
e_raise=False)),
('error', dict(
status='ERROR',
e_raise=resource.ResourceInError)),
('unknown', dict(
status='VIKINGS!',
e_raise=resource.ResourceUnknownStatus))
]
def setUp(self):
super(NovaClientPluginCheckActiveTests, self).setUp()
self.server = mock.Mock()
self.server.id = '1234'
self.server.status = self.status
self.r_mock = self.patchobject(self.nova_plugin, 'refresh_server',
return_value=None)
self.f_mock = self.patchobject(self.nova_plugin, 'fetch_server',
return_value=self.server)
def test_check_active_with_object(self):
if self.e_raise:
self.assertRaises(self.e_raise,
self.nova_plugin._check_active, self.server)
self.r_mock.assert_called_once_with(self.server)
elif self.status in self.nova_plugin.deferred_server_statuses:
self.assertFalse(self.nova_plugin._check_active(self.server))
self.r_mock.assert_called_once_with(self.server)
else:
self.assertTrue(self.nova_plugin._check_active(self.server))
self.assertEqual(0, self.r_mock.call_count)
self.assertEqual(0, self.f_mock.call_count)
def test_check_active_with_string(self):
if self.e_raise:
self.assertRaises(self.e_raise,
self.nova_plugin._check_active, self.server.id)
elif self.status in self.nova_plugin.deferred_server_statuses:
self.assertFalse(self.nova_plugin._check_active(self.server.id))
else:
self.assertTrue(self.nova_plugin._check_active(self.server.id))
self.f_mock.assert_called_once_with(self.server.id)
self.assertEqual(0, self.r_mock.call_count)
def test_check_active_with_string_unavailable(self):
self.f_mock.return_value = None
self.assertFalse(self.nova_plugin._check_active(self.server.id))
self.f_mock.assert_called_once_with(self.server.id)
self.assertEqual(0, self.r_mock.call_count)
class NovaClientPluginUserdataTests(NovaClientPluginTestCase):
def test_build_userdata(self):
"""Tests the build_userdata function."""
cfg.CONF.set_override('heat_metadata_server_url',
'http://server.test:123')
cfg.CONF.set_override('heat_watch_server_url',
'http://server.test:345')
cfg.CONF.set_override('instance_connection_is_secure',
False)
cfg.CONF.set_override(
'instance_connection_https_validate_certificates', False)
data = self.nova_plugin.build_userdata({})
self.assertIn("Content-Type: text/cloud-config;", data)
self.assertIn("Content-Type: text/cloud-boothook;", data)
self.assertIn("Content-Type: text/part-handler;", data)
self.assertIn("Content-Type: text/x-cfninitdata;", data)
self.assertIn("Content-Type: text/x-shellscript;", data)
self.assertIn("http://server.test:345", data)
self.assertIn("http://server.test:123", data)
self.assertIn("[Boto]", data)
def test_build_userdata_without_instance_user(self):
"""Don't add a custom instance user when not requested."""
cfg.CONF.set_override('heat_metadata_server_url',
'http://server.test:123')
cfg.CONF.set_override('heat_watch_server_url',
'http://server.test:345')
data = self.nova_plugin.build_userdata({}, instance_user=None)
self.assertNotIn('user: ', data)
self.assertNotIn('useradd', data)
self.assertNotIn('ec2-user', data)
def test_build_userdata_with_instance_user(self):
"""Add a custom instance user."""
cfg.CONF.set_override('heat_metadata_server_url',
'http://server.test:123')
cfg.CONF.set_override('heat_watch_server_url',
'http://server.test:345')
data = self.nova_plugin.build_userdata({}, instance_user='ec2-user')
self.assertIn('user: ', data)
self.assertIn('useradd', data)
self.assertIn('ec2-user', data)
class NovaClientPluginMetadataTests(NovaClientPluginTestCase):
def test_serialize_string(self):
original = {'test_key': 'simple string value'}
self.assertEqual(original, self.nova_plugin.meta_serialize(original))
def test_serialize_int(self):
original = {'test_key': 123}
expected = {'test_key': '123'}
self.assertEqual(expected, self.nova_plugin.meta_serialize(original))
def test_serialize_list(self):
original = {'test_key': [1, 2, 3]}
expected = {'test_key': '[1, 2, 3]'}
self.assertEqual(expected, self.nova_plugin.meta_serialize(original))
def test_serialize_dict(self):
original = collections.OrderedDict([
('test_key', collections.OrderedDict([
('a', 'b'),
('c', 'd'),
]))
])
expected = {'test_key': '{"a": "b", "c": "d"}'}
actual = self.nova_plugin.meta_serialize(original)
self.assertEqual(json.loads(expected['test_key']),
json.loads(actual['test_key']))
def test_serialize_none(self):
original = {'test_key': None}
expected = {'test_key': 'null'}
self.assertEqual(expected, self.nova_plugin.meta_serialize(original))
def test_serialize_no_value(self):
"""This test is to prove that the user can only pass in a dict to nova
metadata.
"""
excp = self.assertRaises(exception.StackValidationFailed,
self.nova_plugin.meta_serialize, "foo")
self.assertIn('metadata needs to be a Map', six.text_type(excp))
def test_serialize_combined(self):
original = {
'test_key_1': 123,
'test_key_2': 'a string',
'test_key_3': {'a': 'b'},
'test_key_4': None,
}
expected = {
'test_key_1': '123',
'test_key_2': 'a string',
'test_key_3': '{"a": "b"}',
'test_key_4': 'null',
}
self.assertEqual(expected, self.nova_plugin.meta_serialize(original))
class ServerConstraintTest(common.HeatTestCase):
def setUp(self):
super(ServerConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_get_server = mock.Mock()
self.ctx.clients.client_plugin(
'nova').get_server = self.mock_get_server
self.constraint = nova.ServerConstraint()
def test_validation(self):
self.mock_get_server.return_value = mock.MagicMock()
self.assertTrue(self.constraint.validate("foo", self.ctx))
def test_validation_error(self):
self.mock_get_server.side_effect = exception.EntityNotFound(
entity='Server', name='bar')
self.assertFalse(self.constraint.validate("bar", self.ctx))
class FlavorConstraintTest(common.HeatTestCase):
def test_validate(self):
client = fakes_nova.FakeClient()
self.stub_keystoneclient()
self.patchobject(nova.NovaClientPlugin, '_create', return_value=client)
client.flavors = mock.MagicMock()
flavor = collections.namedtuple("Flavor", ["id", "name"])
flavor.id = "1234"
flavor.name = "foo"
client.flavors.list.return_value = [flavor]
constraint = nova.FlavorConstraint()
ctx = utils.dummy_context()
self.assertFalse(constraint.validate("bar", ctx))
self.assertTrue(constraint.validate("foo", ctx))
self.assertTrue(constraint.validate("1234", ctx))
nova.NovaClientPlugin._create.assert_called_once_with()
self.assertEqual(3, client.flavors.list.call_count)
self.assertEqual([(), (), ()],
client.flavors.list.call_args_list)
class NetworkConstraintTest(common.HeatTestCase):
def test_validate(self):
client = fakes_nova.FakeClient()
self.stub_keystoneclient()
self.patchobject(nova.NovaClientPlugin, '_create', return_value=client)
client.networks = mock.Mock()
network = collections.namedtuple("Network", ['id', 'label'])
network.id = '7f47ff06-0353-4013-b814-123b70b1b27d'
network.label = 'foo'
client.networks.get.return_value = network
constraint = nova.NetworkConstraint()
ctx = utils.dummy_context()
self.assertTrue(constraint.validate(network.id, ctx))
client.networks.get.side_effect = nova_exceptions.NotFound('')
client.networks.find.return_value = network
self.assertTrue(constraint.validate(network.id, ctx))
client.networks.find.side_effect = nova_exceptions.NotFound('')
self.assertFalse(constraint.validate(network.id, ctx))
client.networks.find.side_effect = nova_exceptions.NoUniqueMatch()
self.assertFalse(constraint.validate(network.id, ctx))
network.id = 'nonuuid'
client.networks.find.return_value = network
client.networks.find.side_effect = None
self.assertTrue(constraint.validate(network.id, ctx))
class KeypairConstraintTest(common.HeatTestCase):
def test_validation(self):
client = fakes_nova.FakeClient()
self.patchobject(nova.NovaClientPlugin, '_create', return_value=client)
client.keypairs = mock.MagicMock()
key = collections.namedtuple("Key", ["name"])
key.name = "foo"
client.keypairs.get.side_effect = [
fakes_nova.fake_exception(), key]
constraint = nova.KeypairConstraint()
ctx = utils.dummy_context()
self.assertFalse(constraint.validate("bar", ctx))
self.assertTrue(constraint.validate("foo", ctx))
self.assertTrue(constraint.validate("", ctx))
nova.NovaClientPlugin._create.assert_called_once_with()
calls = [mock.call('bar'),
mock.call(key.name)]
client.keypairs.get.assert_has_calls(calls)
class ConsoleUrlsTest(common.HeatTestCase):
scenarios = [
('novnc', dict(console_type='novnc', srv_method='vnc')),
('xvpvnc', dict(console_type='xvpvnc', srv_method='vnc')),
('spice', dict(console_type='spice-html5', srv_method='spice')),
('rdp', dict(console_type='rdp-html5', srv_method='rdp')),
('serial', dict(console_type='serial', srv_method='serial')),
]
def setUp(self):
super(ConsoleUrlsTest, self).setUp()
self.nova_client = mock.Mock()
con = utils.dummy_context()
c = con.clients
self.nova_plugin = c.client_plugin('nova')
self.nova_plugin._client = self.nova_client
self.server = mock.Mock()
self.console_method = getattr(self.server,
'get_%s_console' % self.srv_method)
def test_get_console_url(self):
console = {
'console': {
'type': self.console_type,
'url': '%s_console_url' % self.console_type
}
}
self.console_method.return_value = console
console_url = self.nova_plugin.get_console_urls(self.server)[
self.console_type]
self.assertEqual(console['console']['url'], console_url)
self.console_method.assert_called_once_with(self.console_type)
def test_get_console_url_tolerate_unavailable(self):
msg = 'Unavailable console type %s.' % self.console_type
self.console_method.side_effect = nova_exceptions.BadRequest(
400, message=msg)
console_url = self.nova_plugin.get_console_urls(self.server)[
self.console_type]
self.console_method.assert_called_once_with(self.console_type)
self.assertEqual(msg, console_url)
def test_get_console_urls_reraises_other_400(self):
exc = nova_exceptions.BadRequest
self.console_method.side_effect = exc(400, message="spam")
urls = self.nova_plugin.get_console_urls(self.server)
e = self.assertRaises(exc, urls.__getitem__, self.console_type)
self.assertIn('spam', e.message)
self.console_method.assert_called_once_with(self.console_type)
def test_get_console_urls_reraises_other(self):
exc = Exception
self.console_method.side_effect = exc("spam")
urls = self.nova_plugin.get_console_urls(self.server)
e = self.assertRaises(exc, urls.__getitem__, self.console_type)
self.assertIn('spam', e.args)
self.console_method.assert_called_once_with(self.console_type)
|
|
import logging
from collections import defaultdict
from django.utils import six
from django.utils.safestring import mark_safe
from .base import (
Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs,
)
from .library import Library
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
logger = logging.getLogger('django.template')
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
context_key = 'extends_context'
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def find_template(self, template_name, context):
"""
This is a wrapper around engine.find_template(). A history is kept in
the render_context attribute between successive extends calls and
passed as the skip argument. This enables extends to work recursively
without extending the same template twice.
"""
# RemovedInDjango20Warning: If any non-recursive loaders are installed
# do a direct template lookup. If the same template name appears twice,
# raise an exception to avoid system recursion.
for loader in context.template.engine.template_loaders:
if not loader.supports_recursion:
history = context.render_context.setdefault(
self.context_key, [context.template.origin.template_name],
)
if template_name in history:
raise ExtendsError(
"Cannot extend templates recursively when using "
"non-recursive template loaders",
)
template = context.template.engine.get_template(template_name)
history.append(template_name)
return template
history = context.render_context.setdefault(
self.context_key, [context.template.origin],
)
template, origin = context.template.engine.find_template(
template_name, skip=history,
)
history.append(origin)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
context_key = '__include_context'
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try our cache, and get_template()
template_name = template
cache = context.render_context.setdefault(self.context_key, {})
template = cache.get(template_name)
if template is None:
template = context.template.engine.get_template(template_name)
cache[template_name] = template
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception:
if context.template.engine.debug:
raise
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.warn(
"Exception raised while rendering {%% include %%} for "
"template '%s'. Empty string rendered instead.",
template_name,
exc_info=True,
)
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras CategoryEncoding preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import keras_export
TFIDF = "tf-idf"
INT = "int"
BINARY = "binary"
COUNT = "count"
# The string tokens in the extracted vocabulary
_NUM_ELEMENTS_NAME = "num_elements"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
@keras_export("keras.layers.experimental.preprocessing.CategoryEncoding", v1=[])
class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Category encoding layer.
This layer provides options for condensing data into a categorical encoding.
It accepts integer values as inputs and outputs a dense representation
(one sample = 1-index tensor of float values representing data about the
sample's tokens) of those inputs.
Examples:
**Multi-hot encoding data if you know in advance the number of tokens**
In this case, you can pass the `max_tokens` argument to the constructor.
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... max_tokens=4, output_mode="binary")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
**Multi-hot encoding data where the number of tokens is unknown**
In this case, you should `adapt()` the layer on a sample dataset.
```python
layer = CategoryEncoding(output_mode="binary")
layer.adapt(sample_dataset) # Indexes the vocabulary of the data
outputs = layer(inputs)
```
**Using weighted inputs in `count` mode**
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... max_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
<tf.Tensor: shape=(4, 4), dtype=float64, numpy=
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]])>
Arguments:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary.
output_mode: Specification for the output of the layer.
Defaults to "binary". Values can
be "binary", "count" or "tf-idf", configuring the layer as follows:
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
Call arguments:
inputs: A 2D tensor `(samples, timesteps)`.
count_weights: A 2D tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode. Not used in
`binary` or `tfidf` mode.
"""
def __init__(self,
max_tokens=None,
output_mode=BINARY,
sparse=False,
**kwargs):
# 'output_mode' must be one of (COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(COUNT, BINARY, TFIDF),
layer_name="CategoryEncoding",
arg_name="output_mode")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
# We need to call super() before we call _add_state_variable().
combiner = _CategoryEncodingCombiner(
max_tokens=max_tokens,
compute_idf=output_mode == TFIDF)
super(CategoryEncoding, self).__init__(combiner=combiner, **kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell(
"CategoryEncoding").set(True)
self.max_tokens = max_tokens
self.output_mode = output_mode
self.sparse = sparse
self._called = False
if self.output_mode == TFIDF:
# The TF-IDF weight may have a (None,) tensorshape. This creates
# a 1D variable with arbitrary shape, which we can assign any weight to
# so long as it has 1 dimension. In order to properly initialize this
# weight in Keras, we need to provide a custom callable initializer which
# does not depend on the shape of the weight (as all other initializers
# do) since the weight is not known. Hence the lambda shape, dtype: [0].
if max_tokens is None:
initializer = lambda shape, dtype: [0]
else:
initializer = init_ops.zeros_initializer
# We are adding these here instead of in build() since they do not depend
# on the input shape at all.
self.tf_idf_weights = self._add_state_variable(
name=_IDF_NAME,
shape=tensor_shape.TensorShape((max_tokens,)),
dtype=K.floatx(),
initializer=initializer)
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape([input_shape[0], self.max_tokens])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = K.floatx() if self.output_mode == TFIDF else dtypes.int64
if self.sparse:
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
Raises:
RuntimeError: if the layer cannot be adapted at this time.
"""
if not reset_state:
raise ValueError("CategoryEncoding does not support streaming adapts.")
super(CategoryEncoding, self).adapt(data, reset_state)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if _NUM_ELEMENTS_NAME in updates:
if self.max_tokens is None:
self.set_num_elements(updates[_NUM_ELEMENTS_NAME])
elif self.max_tokens != updates[_NUM_ELEMENTS_NAME]:
raise RuntimeError("Cannot update states if you construct the layer "
"with `max_tokens`={}".format(self.max_tokens))
if self.output_mode == TFIDF:
self.set_tfidf_data(updates[_IDF_NAME])
def get_config(self):
config = {
"max_tokens": self.max_tokens,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
base_config = super(CategoryEncoding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _convert_to_ndarray(self, x):
if isinstance(x, ops.Tensor):
return x
else:
return np.array(x)
def _convert_to_sparse_inputs(self, inputs):
if isinstance(inputs, sparse_tensor.SparseTensor):
return inputs
elif isinstance(inputs, ragged_tensor.RaggedTensor):
return inputs.to_sparse()
else:
indices = array_ops.where_v2(
math_ops.greater_equal(inputs, array_ops.constant(0, inputs.dtype)))
values = array_ops.gather_nd(inputs, indices)
shape = array_ops.shape(inputs, out_type=dtypes.int64)
return sparse_tensor.SparseTensor(indices, values, shape)
def set_num_elements(self, num_elements):
if self.max_tokens is not None:
raise RuntimeError(
"In order to dynamically set the number of elements, the "
"layer's 'max_tokens' arg must be set to None.")
if not isinstance(num_elements, numbers.Integral):
raise ValueError("num_elements must be a scalar integer.")
if self._called:
raise RuntimeError("num_elements cannot be changed after the layer is "
"called.")
self.max_tokens = num_elements
def set_tfidf_data(self, tfidf_data):
tfidf_data = self._convert_to_ndarray(tfidf_data)
if self.output_mode != TFIDF:
raise RuntimeError(
"In order to set TF-IDF data, the output mode must be 'tf-idf'.")
if tfidf_data.ndim != 1:
raise ValueError("TF-IDF data must be a 1-index array.")
if self.max_tokens is not None:
input_data_length = tfidf_data.shape[0]
if input_data_length > self.max_tokens:
raise ValueError("The array provided has %d elements. This layer is "
"configured to only allow %d elements." %
(input_data_length, self.max_tokens))
if input_data_length < self.max_tokens:
tfidf_data = np.resize(tfidf_data, (self.max_tokens,))
K.set_value(self.tf_idf_weights, tfidf_data)
def call(self, inputs, count_weights=None):
if isinstance(inputs, (list, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
if inputs.shape.rank == 1:
inputs = array_ops.expand_dims(inputs, 1)
if count_weights is not None and self.output_mode != COUNT:
raise ValueError("count_weights is not used in `output_mode='tf-idf'`, "
"or `output_mode='binary'`. Please pass a single input.")
self._called = True
if self.max_tokens is None:
raise RuntimeError(
"If you construct a `CategoryEncoding` layer with "
"`max_tokens=None`, you need to call `adapt()` "
"on it before using it")
else:
out_depth = self.max_tokens
if self.output_mode == TFIDF:
# If the input is a sparse tensor, we densify it with the default value of
# -1. Because -1 is ignored by one_hot, this effectively drops the non-set
# positions from the output encoding.
if self.sparse:
raise ValueError("`sparse=True` with `output_mode=tfidf` "
"is not supported.")
if isinstance(inputs, sparse_tensor.SparseTensor):
inputs = sparse_ops.sparse_tensor_to_dense(inputs, default_value=-1)
one_hot_data = array_ops.one_hot(inputs, depth=out_depth)
counts = math_ops.reduce_sum(one_hot_data, axis=1)
tf_idf_data = math_ops.multiply(counts, self.tf_idf_weights)
tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
return tf_idf_data
binary_output = (self.output_mode == BINARY)
if isinstance(inputs, sparse_tensor.SparseTensor):
max_value = math_ops.reduce_max(inputs.values)
min_value = math_ops.reduce_min(inputs.values)
else:
max_value = math_ops.reduce_max(inputs)
min_value = math_ops.reduce_min(inputs)
condition = math_ops.logical_and(
math_ops.greater(
math_ops.cast(out_depth, max_value.dtype), max_value),
math_ops.greater_equal(
min_value, math_ops.cast(0, min_value.dtype)))
control_flow_ops.Assert(
condition, ["Input values must be in the range 0 <= values < max_tokens"
" with max_tokens={}".format(out_depth)])
if self.sparse:
return sparse_bincount(inputs, out_depth, binary_output, count_weights)
else:
return dense_bincount(inputs, out_depth, binary_output, count_weights)
class _CategoryEncodingAccumulator(
collections.namedtuple("Accumulator", ["data", "per_doc_count_dict"])):
pass
class _CategoryEncodingCombiner(base_preprocessing_layer.Combiner):
"""Combiner for the CategoryEncoding preprocessing layer.
This class encapsulates the logic for computing the number of elements in the
input dataset and the document frequency for each element.
Attributes:
compute_max_element: (Optional) If set, this combiner will return the
maximum element in this set as part of its `extract()` call.
compute_idf: (Optional) If set, the inverse document frequency will be
computed for each value.
"""
# These are indices into the accumulator's `data` array.
MAX_VALUE_IDX = 0
DOC_ID_IDX = 1
def __init__(self, max_tokens=None, compute_idf=False):
self.max_tokens = max_tokens
self._compute_idf = compute_idf
def compute(self, values, accumulator=None):
"""Computes a step in this computation, returning a new accumulator."""
values = base_preprocessing_layer.convert_to_list(values)
if accumulator is None:
accumulator = self._create_accumulator()
# TODO(momernick): Benchmark improvements to this algorithm.
for element in values:
if not isinstance(element, list):
element = [element]
current_doc_id = accumulator.data[self.DOC_ID_IDX]
for value in element:
if self.max_tokens is None:
current_max_value = accumulator.data[self.MAX_VALUE_IDX]
if value > current_max_value:
accumulator.data[self.MAX_VALUE_IDX] = value
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[value]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
accumulator.data[self.DOC_ID_IDX] += 1
return accumulator
def merge(self, accumulators):
"""Merges several accumulators to a single accumulator."""
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
base_accumulator.data[self.DOC_ID_IDX] += accumulator.data[
self.DOC_ID_IDX]
if self.max_tokens is None:
base_accumulator.data[self.MAX_VALUE_IDX] = max(
base_accumulator.data[self.MAX_VALUE_IDX],
accumulator.data[self.MAX_VALUE_IDX])
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator
def _inverse_document_frequency(self, document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of TFIDF.
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
document_counts: An array of the # of documents each token appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return np.log(1 + num_documents / (1 + np.array(document_counts)))
def extract(self, accumulator):
"""Converts an accumulator into a dict of output values.
Args:
accumulator: An accumulator aggregating over the full dataset.
Returns:
A dict of:
"num_elements": The number of unique elements in the data set. Only
returned if `compute_max_element` is True.
"idf": The inverse-document-frequency for each index, where idf[i] is
the IDF value for index i. Only returned if `compute_idf` is True.
"""
data, document_counts = accumulator
if data[self.MAX_VALUE_IDX] is not None:
max_element = data[self.MAX_VALUE_IDX] + 1
else:
max_element = self.max_tokens
output_dict = {}
if self.max_tokens is None:
output_dict[_NUM_ELEMENTS_NAME] = max_element
if self._compute_idf:
num_documents = data[self.DOC_ID_IDX]
# Here, we need to get the doc_counts for every token value, including
# values we have not yet seen (and are not in the document_counts dict).
# However, because document_counts is a defaultdict (see below), querying
# the dict directly for those values gives us meaningful counts (of 0).
# However, this also means we can't just extract the values in
# document_counts - we need to do a deliberate indexing using range().
doc_counts = [document_counts[i]["count"] for i in range(max_element)]
idf = self._inverse_document_frequency(doc_counts, num_documents)
output_dict[_IDF_NAME] = idf
return output_dict
def restore(self, output):
"""Creates an accumulator based on 'output'."""
raise NotImplementedError(
"CategoryEncoding does not restore or support streaming updates.")
def serialize(self, accumulator):
"""Serializes an accumulator for a remote call."""
output_dict = {}
output_dict["data"] = accumulator.data
if self._compute_idf:
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
"""Deserializes an accumulator received from 'serialize()'."""
accumulator_dict = json.loads(compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
for i, value in enumerate(accumulator_dict["data"]):
accumulator.data[i] = value
if self._compute_idf:
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
"""Accumulates a sorted array of vocab tokens and corresponding counts."""
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
else:
per_doc_count_dict = None
if self.max_tokens is None:
data = [0, 0]
else:
data = [None, 0]
return _CategoryEncodingAccumulator(data, per_doc_count_dict)
def sparse_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = bincount_ops.sparse_bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
axis=-1,
binary_output=binary_output)
result = math_ops.cast(result, K.floatx())
batch_size = array_ops.shape(result)[0]
result = sparse_tensor.SparseTensor(
indices=result.indices,
values=result.values,
dense_shape=[batch_size, out_depth])
return result
def dense_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input."""
result = bincount_ops.bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
dtype=K.floatx(),
axis=-1,
binary_output=binary_output)
result.set_shape(tensor_shape.TensorShape((None, out_depth)))
return result
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ExportIntegrationTest(PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, extra_args=None):
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export', '--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file) as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def test_export_code_gen(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift/isolated/{0}:{0}'.format(thrift_target_name))
self.assertIn(codegen_target, json_data.get('targets').keys())
def test_export_json_transitive_jar(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
def test_export_jar_path_with_excludes(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
def test_export_jar_path_with_excludes_soft(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--resolve-ivy-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
def test_export_jar_path(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
# NB(Eric Ayers) The setting the cache dir from the IvySubsystem instance can be difficult
# to get in a test that isn't a subclass of TaskTestBase.
# ivy_cache_dir = IvySubsystem.global_instance().get_options().cache_dir
ivy_cache_dir = os.path.expanduser('~/.ivy2/pants')
common_lang_lib_info = json_data.get('libraries').get('commons-lang:commons-lang:2.5')
self.assertIsNotNone(common_lang_lib_info)
self.assertEquals(
common_lang_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'commons-lang/commons-lang/jars/commons-lang-2.5.jar')
)
self.assertEquals(
common_lang_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir,
'commons-lang/commons-lang/javadocs/commons-lang-2.5-javadoc.jar')
)
self.assertEquals(
common_lang_lib_info.get('sources'),
os.path.join(ivy_cache_dir,
'commons-lang/commons-lang/sources/commons-lang-2.5-sources.jar')
)
def test_dep_map_for_java_sources(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
def test_sources_and_javadocs(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir, load_libs=True)
scala_lang_lib = json_data.get('libraries').get('org.scala-lang:scala-library:2.10.4')
self.assertIsNotNone(scala_lang_lib)
self.assertIsNotNone(scala_lang_lib['default'])
self.assertIsNotNone(scala_lang_lib['sources'])
self.assertIsNotNone(scala_lang_lib['javadoc'])
def test_ivy_classifiers(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
# NB(Eric Ayers) The setting the cache dir from the IvySubsystem instance can be difficult
# to get in a test that isn't a subclass of TaskTestBase.
# ivy_cache_dir = IvySubsystem.global_instance().get_options().cache_dir
ivy_cache_dir = os.path.expanduser('~/.ivy2/pants')
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertEquals(
avro_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7.jar')
)
self.assertEquals(
avro_lib_info.get('tests'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7-tests.jar')
)
# TODO(Eric Ayers): Pants does not properly download javadoc and test jars
#self.assertEquals(
# common_lang_lib_info.get('javadoc'),
# os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7-javadoc.jar')
#)
#self.assertEquals(
# common_lang_lib_info.get('sources'),
# os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7-sources.jar')
#)
def test_distributions_and_platforms(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEquals('java7', targets[target_name]['platform'])
self.assertEquals(
{
'darwin': ['/Library/JDK'],
'linux': ['/usr/lib/jdk7', u'/usr/lib/jdk8'],
},
json_data['jvm_distributions'])
self.assertEquals(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
def test_intellij_integration(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
targets = ['src/python/::', 'tests/python/pants_test:all', 'contrib/::']
excludes = [
'--exclude-target-regexp=.*go/examples.*',
'--exclude-target-regexp=.*scrooge/tests/thrift.*',
'--exclude-target-regexp=.*spindle/tests/thrift.*',
'--exclude-target-regexp=.*spindle/tests/jvm.*'
]
json_data = self.run_export(targets, workdir, extra_args=excludes)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
core_target = json_data['targets']['src/python/pants/backend/core:core']
self.assertIsNotNone(core_target)
self.assertEquals(default_interpreter, core_target['python_interpreter'])
|
|
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgprecipitation"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgParticle
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgprecipitation.cpp'
# OpenSceneGraph example, osgprecipitation.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgDB/ReadFile>
#include <osgDB/FileUtils>
#include <osgUtil/Optimizer>
#include <osgUtil/CullVisitor>
#include <osg/ClipNode>
#include <osgGA/TrackballManipulator>
#include <osgGA/FlightManipulator>
#include <osgGA/DriveManipulator>
#include <osgGA/KeySwitchMatrixManipulator>
#include <osgGA/StateSetManipulator>
#include <osgGA/AnimationPathManipulator>
#include <osgGA/TerrainManipulator>
#include <osgViewer/Viewer>
#include <osg/MatrixTransform>
#include <osgUtil/TransformCallback>
#include <osgParticle/PrecipitationEffect>
#include <iostream>
class MyGustCallback (osg.NodeCallback) :
MyGustCallback()
virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
pe = dynamic_cast<osgParticle.PrecipitationEffect*>(node)
value = sin(nv.getFrameStamp().getSimulationTime())
if value<-0.5 :
pe.snow(1.0)
else:
pe.rain(0.5)
traverse(node, nv)
def main(argv):
# use an ArgumentParser object to manage the program arguments.
arguments = osg.ArgumentParser(argv)
# set up the usage document, in case we need to print out how to use this program.
arguments.getApplicationUsage().setApplicationName(arguments.getApplicationName())
arguments.getApplicationUsage().setDescription(arguments.getApplicationName()+" example provides an interactive viewer for visualising point clouds..")
arguments.getApplicationUsage().setCommandLineUsage(arguments.getApplicationName()+" [options] filename ...")
arguments.getApplicationUsage().addCommandLineOption("-h or --help","Display this information")
arguments.getApplicationUsage().addCommandLineOption("--snow <density>","Set the snow with a density between 0 and 1.0")
arguments.getApplicationUsage().addCommandLineOption("--rain <density>","")
arguments.getApplicationUsage().addCommandLineOption("--particleSize <size>","")
arguments.getApplicationUsage().addCommandLineOption("--particleColour <red> <green> <blue> <alpha>","")
arguments.getApplicationUsage().addCommandLineOption("--wind <x> <y> <z>","Set the wind speed in model coordinates")
arguments.getApplicationUsage().addCommandLineOption("--particleSpeed <float>","Set the particle speed")
arguments.getApplicationUsage().addCommandLineOption("--nearTransition <distance>","Set the near transistion distance")
arguments.getApplicationUsage().addCommandLineOption("--farTransition <distance>","Set the far transistion distance")
arguments.getApplicationUsage().addCommandLineOption("--particleDensity <density>","Set the particle density")
arguments.getApplicationUsage().addCommandLineOption("--cellSize <x> <y> <z>","Set the cell size in model coordinates")
arguments.getApplicationUsage().addCommandLineOption("--fogDensity <density>","Set the fog density")
arguments.getApplicationUsage().addCommandLineOption("--fogColour <red> <green> <blue> <alpha>","Set fog colour.")
arguments.getApplicationUsage().addCommandLineOption("-useFarLineSegments","Switch on the use of line segments")
# construct the viewer.
viewer = osgViewer.Viewer()
# set up the camera manipulators.
keyswitchManipulator = osgGA.KeySwitchMatrixManipulator()
keyswitchManipulator.addMatrixManipulator( ord("1"), "Trackball", osgGA.TrackballManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("2"), "Flight", osgGA.FlightManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("3"), "Drive", osgGA.DriveManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("4"), "Terrain", osgGA.TerrainManipulator() )
pathfile = str()
keyForAnimationPath = ord("5")
while arguments.read("-p",pathfile) :
apm = osgGA.AnimationPathManipulator(pathfile)
if apm or not apm.valid() :
num = keyswitchManipulator.getNumMatrixManipulators()
keyswitchManipulator.addMatrixManipulator( keyForAnimationPath, "Path", apm )
keyswitchManipulator.selectMatrixManipulator(num)
++keyForAnimationPath
viewer.setCameraManipulator( keyswitchManipulator )
precipitationEffect = osgParticle.PrecipitationEffect()
intensity = float()
while arguments.read("--snow", intensity) : precipitationEffect.snow(intensity)
while arguments.read("--rain", intensity) : precipitationEffect.rain(intensity)
value = float()
while arguments.read("--particleSize", value) : precipitationEffect.setParticleSize(value)
color = osg.Vec4()
while arguments.read("--particleColor", color.r(), color.g(), color.b(), color.a()) : precipitationEffect.setParticleColor(color)
while arguments.read("--particleColour", color.r(), color.g(), color.b(), color.a()) : precipitationEffect.setParticleColor(color)
wind = osg.Vec3()
while arguments.read("--wind", wind.x(), wind.y(), wind.z()) : precipitationEffect.setWind(wind)
while arguments.read("--particleSpeed", value) : precipitationEffect.setParticleSpeed(value)
while arguments.read("--nearTransition", value ) : precipitationEffect.setNearTransition(value)
while arguments.read("--farTransition", value ) : precipitationEffect.setFarTransition(value)
while arguments.read("--particleDensity", value ) : precipitationEffect.setMaximumParticleDensity(value)
cellSize = osg.Vec3()
while arguments.read("--cellSize", cellSize.x(), cellSize.y(), cellSize.z()) : precipitationEffect.setCellSize(cellSize)
clipDistance = 0.0
while arguments.read("--clip",clipDistance) :
bb = osg.BoundingBox()
while arguments.read("--boundingBox", bb.xMin(),
bb.yMin(),
bb.zMin(),
bb.xMax(),
bb.yMax(),
bb.zMax()) :
while arguments.read("--fogDensity", value ) : precipitationEffect.getFog().setDensity(value)
while arguments.read("--fogColor", color.r(), color.g(), color.b(), color.a() ) : precipitationEffect.getFog().setColor(color)
while arguments.read("--fogColour", color.r(), color.g(), color.b(), color.a() ) : precipitationEffect.getFog().setColor(color)
while arguments.read("--useFarLineSegments") : precipitationEffect.setUseFarLineSegments(True)
viewer.getCamera().setClearColor( precipitationEffect.getFog().getColor() )
# if user request help write it out to cout.
if arguments.read("-h") or arguments.read("--help") :
arguments.getApplicationUsage().write(std.cout)
return 1
# read the scene from the list of file specified commandline args.
loadedModel = osgDB.readNodeFiles(arguments)
if not loadedModel :
print arguments.getApplicationName(), ": No data loaded"
return 1
# precipitationEffect.setUpdateCallback(MyGustCallback)()
group = osg.Group()
if clipDistance not =0.0 :
clipNode = osg.ClipNode()
clipNode.addClipPlane( osg.ClipPlane( 0 ) )
clipNode.getClipPlane(0).setClipPlane( 0.0, 0.0, -1.0, -clipDistance )
clipNode.setReferenceFrame(osg.ClipNode.ABSOLUTE_RF)
clipNode.addChild(precipitationEffect)
group.addChild(clipNode)
else:
group.addChild(precipitationEffect)
group.addChild(loadedModel)
loadedModel.getOrCreateStateSet().setAttributeAndModes(precipitationEffect.getFog())
# create the light
lightSource = osg.LightSource()
group.addChild(lightSource)
light = lightSource.getLight()
light.setLightNum(0)
light.setPosition(osg.Vec4(0.0,0.0,1.0,0.0)) # directional light from above
light.setAmbient(osg.Vec4(0.8,0.8,0.8,1.0))
light.setDiffuse(osg.Vec4(0.2,0.2,0.2,1.0))
light.setSpecular(osg.Vec4(0.2,0.2,0.2,1.0))
# set the scene to render
viewer.setSceneData(group)
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
|
|
# -*- coding: utf-8 -*-
"""Configuration introspection and defaults."""
from __future__ import absolute_import, unicode_literals
import sys
from collections import deque, namedtuple
from datetime import timedelta
from celery.five import items, keys, python_2_unicode_compatible
from celery.utils.functional import memoize
from celery.utils.serialization import strtobool
__all__ = ('Option', 'NAMESPACES', 'flatten', 'find')
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
DEFAULT_POOL = 'prefork'
if is_jython:
DEFAULT_POOL = 'solo'
elif is_pypy:
if sys.pypy_version_info[0:3] < (1, 5, 0):
DEFAULT_POOL = 'solo'
else:
DEFAULT_POOL = 'prefork'
DEFAULT_ACCEPT_CONTENT = ['json']
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
%(task_name)s[%(task_id)s]: %(message)s"""
OLD_NS = {'celery_{0}'}
OLD_NS_BEAT = {'celerybeat_{0}'}
OLD_NS_WORKER = {'celeryd_{0}'}
searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))
def Namespace(__old__=None, **options):
if __old__ is not None:
for key, opt in items(options):
if not opt.old:
opt.old = {o.format(key) for o in __old__}
return options
def old_ns(ns):
return {'{0}_{{0}}'.format(ns)}
@python_2_unicode_compatible
class Option(object):
"""Decribes a Celery configuration option."""
alt = None
deprecate_by = None
remove_by = None
old = set()
typemap = {'string': str, 'int': int, 'float': float, 'any': lambda v: v,
'bool': strtobool, 'dict': dict, 'tuple': tuple}
def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get('type') or 'string'
for attr, value in items(kwargs):
setattr(self, attr, value)
def to_python(self, value):
return self.typemap[self.type](value)
def __repr__(self):
return '<Option: type->{0} default->{1!r}>'.format(self.type,
self.default)
NAMESPACES = Namespace(
accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS),
enable_utc=Option(True, type='bool'),
imports=Option((), type='tuple', old=OLD_NS),
include=Option((), type='tuple', old=OLD_NS),
timezone=Option(type='string', old=OLD_NS),
beat=Namespace(
__old__=OLD_NS_BEAT,
max_loop_interval=Option(0, type='float'),
schedule=Option({}, type='dict'),
scheduler=Option('celery.beat:PersistentScheduler'),
schedule_filename=Option('celerybeat-schedule'),
sync_every=Option(0, type='int'),
),
broker=Namespace(
url=Option(None, type='string'),
read_url=Option(None, type='string'),
write_url=Option(None, type='string'),
transport=Option(type='string'),
transport_options=Option({}, type='dict'),
connection_timeout=Option(4, type='float'),
connection_retry=Option(True, type='bool'),
connection_max_retries=Option(100, type='int'),
failover_strategy=Option(None, type='string'),
heartbeat=Option(120, type='int'),
heartbeat_checkrate=Option(3.0, type='int'),
login_method=Option(None, type='string'),
pool_limit=Option(10, type='int'),
use_ssl=Option(False, type='bool'),
host=Option(type='string'),
port=Option(type='int'),
user=Option(type='string'),
password=Option(type='string'),
vhost=Option(type='string'),
),
cache=Namespace(
__old__=old_ns('celery_cache'),
backend=Option(),
backend_options=Option({}, type='dict'),
),
cassandra=Namespace(
entry_ttl=Option(type='float'),
keyspace=Option(type='string'),
port=Option(type='string'),
read_consistency=Option(type='string'),
servers=Option(type='list'),
table=Option(type='string'),
write_consistency=Option(type='string'),
auth_provider=Option(type='string'),
auth_kwargs=Option(type='string'),
options=Option({}, type='dict'),
),
control=Namespace(
queue_ttl=Option(300.0, type='float'),
queue_expires=Option(10.0, type='float'),
),
couchbase=Namespace(
__old__=old_ns('celery_couchbase'),
backend_settings=Option(None, type='dict'),
),
mongodb=Namespace(
__old__=old_ns('celery_mongodb'),
backend_settings=Option(type='dict'),
),
event=Namespace(
__old__=old_ns('celery_event'),
queue_expires=Option(60.0, type='float'),
queue_ttl=Option(5.0, type='float'),
queue_prefix=Option('celeryev'),
serializer=Option('json'),
),
redis=Namespace(
__old__=old_ns('celery_redis'),
backend_use_ssl=Option(type='dict'),
db=Option(type='int'),
host=Option(type='string'),
max_connections=Option(type='int'),
password=Option(type='string'),
port=Option(type='int'),
socket_timeout=Option(120.0, type='float'),
socket_connect_timeout=Option(None, type='float'),
),
result=Namespace(
__old__=old_ns('celery_result'),
backend=Option(type='string'),
cache_max=Option(
-1,
type='int', old={'celery_max_cached_results'},
),
compression=Option(type='str'),
exchange=Option('celeryresults'),
exchange_type=Option('direct'),
expires=Option(
timedelta(days=1),
type='float', old={'celery_task_result_expires'},
),
persistent=Option(None, type='bool'),
serializer=Option('json'),
backend_transport_options=Option({}, type='dict'),
),
elasticsearch=Namespace(
__old__=old_ns('celery_elasticsearch'),
retry_on_timeout=Option(type='bool'),
max_retries=Option(type='int'),
timeout=Option(type='float'),
),
riak=Namespace(
__old__=old_ns('celery_riak'),
backend_settings=Option(type='dict'),
),
security=Namespace(
__old__=old_ns('celery_security'),
certificate=Option(type='string'),
cert_store=Option(type='string'),
key=Option(type='string'),
),
database=Namespace(
url=Option(old={'celery_result_dburi'}),
engine_options=Option(
type='dict', old={'celery_result_engine_options'},
),
short_lived_sessions=Option(
False, type='bool', old={'celery_result_db_short_lived_sessions'},
),
table_names=Option(type='dict', old={'celery_result_db_tablenames'}),
),
task=Namespace(
__old__=OLD_NS,
acks_late=Option(False, type='bool'),
always_eager=Option(False, type='bool'),
annotations=Option(type='any'),
compression=Option(type='string', old={'celery_message_compression'}),
create_missing_queues=Option(True, type='bool'),
default_delivery_mode=Option(2, type='string'),
default_queue=Option('celery'),
default_exchange=Option(None, type='string'), # taken from queue
default_exchange_type=Option('direct'),
default_routing_key=Option(None, type='string'), # taken from queue
default_rate_limit=Option(type='string'),
eager_propagates=Option(
False, type='bool', old={'celery_eager_propagates_exceptions'},
),
ignore_result=Option(False, type='bool'),
protocol=Option(2, type='int', old={'celery_task_protocol'}),
publish_retry=Option(
True, type='bool', old={'celery_task_publish_retry'},
),
publish_retry_policy=Option(
{'max_retries': 3,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2},
type='dict', old={'celery_task_publish_retry_policy'},
),
queues=Option(type='dict'),
queue_ha_policy=Option(None, type='string'),
queue_max_priority=Option(None, type='int'),
reject_on_worker_lost=Option(type='bool'),
remote_tracebacks=Option(False, type='bool'),
routes=Option(type='any'),
send_sent_event=Option(
False, type='bool', old={'celery_send_task_sent_event'},
),
serializer=Option('json', old={'celery_task_serializer'}),
soft_time_limit=Option(
type='float', old={'celeryd_task_soft_time_limit'},
),
time_limit=Option(
type='float', old={'celeryd_task_time_limit'},
),
store_errors_even_if_ignored=Option(False, type='bool'),
track_started=Option(False, type='bool'),
),
worker=Namespace(
__old__=OLD_NS_WORKER,
agent=Option(None, type='string'),
autoscaler=Option('celery.worker.autoscale:Autoscaler'),
concurrency=Option(0, type='int'),
consumer=Option('celery.worker.consumer:Consumer', type='string'),
direct=Option(False, type='bool', old={'celery_worker_direct'}),
disable_rate_limits=Option(
False, type='bool', old={'celery_disable_rate_limits'},
),
enable_remote_control=Option(
True, type='bool', old={'celery_enable_remote_control'},
),
hijack_root_logger=Option(True, type='bool'),
log_color=Option(type='bool'),
log_format=Option(DEFAULT_PROCESS_LOG_FMT),
lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}),
max_memory_per_child=Option(type='int'),
max_tasks_per_child=Option(type='int'),
pool=Option(DEFAULT_POOL),
pool_putlocks=Option(True, type='bool'),
pool_restarts=Option(False, type='bool'),
prefetch_multiplier=Option(4, type='int'),
redirect_stdouts=Option(
True, type='bool', old={'celery_redirect_stdouts'},
),
redirect_stdouts_level=Option(
'WARNING', old={'celery_redirect_stdouts_level'},
),
send_task_events=Option(
False, type='bool', old={'celery_send_events'},
),
state_db=Option(),
task_log_format=Option(DEFAULT_TASK_LOG_FMT),
timer=Option(type='string'),
timer_precision=Option(1.0, type='float'),
),
)
def _flatten_keys(ns, key, opt):
return [(ns + key, opt)]
def _to_compat(ns, key, opt):
if opt.old:
return [
(oldkey.format(key).upper(), ns + key, opt)
for oldkey in opt.old
]
return [((ns + key).upper(), ns + key, opt)]
def flatten(d, root='', keyfilter=_flatten_keys):
"""Flatten settings."""
stack = deque([(root, d)])
while stack:
ns, options = stack.popleft()
for key, opt in items(options):
if isinstance(opt, dict):
stack.append((ns + key + '_', opt))
else:
for ret in keyfilter(ns, key, opt):
yield ret
DEFAULTS = {
key: opt.default for key, opt in flatten(NAMESPACES)
}
__compat = list(flatten(NAMESPACES, keyfilter=_to_compat))
_OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat}
_TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat}
_TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat}
__compat = None
SETTING_KEYS = set(keys(DEFAULTS))
_OLD_SETTING_KEYS = set(keys(_TO_NEW_KEY))
def find_deprecated_settings(source): # pragma: no cover
from celery.utils import deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
deprecated.warn(description='The {0!r} setting'.format(name),
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative='Use the {0.alt} instead'.format(opt))
return source
@memoize(maxsize=None)
def find(name, namespace='celery'):
"""Find setting by name."""
# - Try specified name-space first.
namespace = namespace.lower()
try:
return searchresult(
namespace, name.lower(), NAMESPACES[namespace][name.lower()],
)
except KeyError:
# - Try all the other namespaces.
for ns, opts in items(NAMESPACES):
if ns.lower() == name.lower():
return searchresult(None, ns, opts)
elif isinstance(opts, dict):
try:
return searchresult(ns, name.lower(), opts[name.lower()])
except KeyError:
pass
# - See if name is a qualname last.
return searchresult(None, name.lower(), DEFAULTS[name.lower()])
|
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch # for torch.cat and torch.zeros
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from nhwc.conv import Conv2d_NHWC
from nhwc.batch_norm import BatchNorm2d_NHWC
from nhwc.max_pool import MaxPool2d_NHWC
# Group batch norm
from apex.parallel import SyncBatchNorm as gbn
# Persistent group BN for NHWC case
from apex.contrib.groupbn.batch_norm import BatchNorm2d_NHWC as gbn_persistent
import apex.parallel
__all__ = ['resnet']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Layers_NCHW:
Conv2d = nn.Conv2d
MaxPool = nn.MaxPool2d
BnAddRelu = None # will be assigned at construction
def __init__(self, bn_group, **kwargs):
super(Layers_NCHW, self).__init__()
self.nhwc = False
self.bn_group = bn_group
if (bn_group > 1):
bn_base = gbn
else:
bn_base = nn.BatchNorm2d
class BnAddRelu_(bn_base):
def __init__(self, planes, fuse_relu=False, bn_group=1):
if (bn_group > 1):
super(BnAddRelu_, self).__init__(
planes,
process_group=apex.parallel.create_syncbn_process_group(bn_group))
else:
super(BnAddRelu_, self).__init__(planes)
self.fuse_relu_flag = fuse_relu
def forward(self, x, z=None):
out = super().forward(x)
if z is not None:
out = out.add_(z)
if self.fuse_relu_flag:
out = out.relu_()
return out
# this is still Layers_NCHW::__init__
self.BnAddRelu = BnAddRelu_
def build_bn(self, planes, fuse_relu=False):
return self.BnAddRelu(planes, fuse_relu, self.bn_group)
class Layers_NHWC:
Conv2d = Conv2d_NHWC
MaxPool = MaxPool2d_NHWC
class BnAddRelu(gbn_persistent):
def __init__(self, planes, fuse_relu=False, bn_group=1):
super(Layers_NHWC.BnAddRelu, self).__init__(planes,
fuse_relu,
bn_group=bn_group)
def __init__(self, bn_group, **kwargs):
super(Layers_NHWC, self).__init__()
self.nhwc = True
self.bn_group = bn_group
def build_bn(self, planes, fuse_relu):
return self.BnAddRelu(planes, fuse_relu, self.bn_group)
def conv1x1(layer_types, in_planes, out_planes, stride=1):
"""1x1 convolution"""
return layer_types.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def conv3x3(layer_types, in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return layer_types.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, layerImpls, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(layerImpls, inplanes, planes, stride=stride)
self.bn1 = layerImpls.build_bn(planes, fuse_relu=True)
self.conv2 = conv3x3(layerImpls, planes, planes)
self.bn2 = layerImpls.build_bn(planes, fuse_relu=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None:
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.conv2(out)
out = self.bn2(out, residual)
return out
class ResNet(nn.Module):
def __init__(self, layerImpls, block, layers, num_classes=1000,
pad_input=False, ssd_mods=False, use_nhwc=False,
bn_group=1):
self.inplanes = 64
super(ResNet, self).__init__()
if pad_input:
input_channels = 4
else:
input_channels = 3
self.conv1 = layerImpls.Conv2d(input_channels, 64, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = layerImpls.build_bn(64, fuse_relu=True)
self.maxpool = layerImpls.MaxPool(kernel_size=3, stride=2, padding=1)
# Add conv{2,3,4}
self.layer1 = self._make_layer(layerImpls, block, 64, layers[0])
self.layer2 = self._make_layer(layerImpls, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(layerImpls, block, 256, layers[2], stride=1)
# FIXME! This (a) fails for nhwc, and (b) is irrelevant if the user is
# also loading pretrained data (which we don't know about here, but
# know about in the caller (the "resnet()" function below).
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, layerImpls, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
layerImpls.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
layerImpls.build_bn(planes * block.expansion, fuse_relu=False),
)
layers = []
layers.append(block(layerImpls, self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(layerImpls, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.classifier(x)
return x
def _transpose_state(state, pad_input=False):
for k in state.keys():
if len(state[k].shape) == 4:
if pad_input and "conv1.weight" in k and not 'layer' in k:
s = state[k].shape
state[k] = torch.cat([state[k], torch.zeros([s[0], 1, s[2], s[3]])], dim=1)
state[k] = state[k].permute(0, 2, 3, 1).contiguous()
return state
def resnet34(pretrained=False, nhwc=False, ssd_mods=False, **kwargs):
"""Constructs a ResNet model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if nhwc:
layerImpls = Layers_NHWC(**kwargs)
else:
layerImpls = Layers_NCHW(**kwargs)
block = BasicBlock
layer_list = [3, 4, 6, 3]
model = ResNet(layerImpls, block, layer_list, ssd_mods=ssd_mods, use_nhwc=nhwc, **kwargs)
if pretrained:
orig_state_dict = model_zoo.load_url(model_urls['resnet34'])
# Modify the state dict to remove conv5 / layer4
state_dict = {k:orig_state_dict[k] for k in orig_state_dict if (not k.startswith('layer4') and not k.startswith('fc'))}
pad_input = kwargs.get('pad_input', False)
if nhwc:
state_dict = _transpose_state(state_dict, pad_input)
model.load_state_dict(state_dict)
return nn.Sequential(model.conv1, model.bn1, model.maxpool, model.layer1, model.layer2, model.layer3)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.io.graph_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tempfile
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
_VALID_FILE_PATTERN = "VALID"
_FILE_NAMES = [b"abc", b"def", b"ghi", b"jkl"]
_INVALID_FILE_PATTERN = "INVALID"
class GraphIOTest(tf.test.TestCase):
def _mock_glob(self, pattern):
if _VALID_FILE_PATTERN == pattern:
return _FILE_NAMES
self.assertEqual(_INVALID_FILE_PATTERN, pattern)
return []
def setUp(self):
super(GraphIOTest, self).setUp()
random.seed(42)
self._orig_glob = gfile.Glob
gfile.Glob = self._mock_glob
def tearDown(self):
gfile.Glob = self._orig_glob
super(GraphIOTest, self).tearDown()
def test_dequeue_batch_value_errors(self):
default_batch_size = 17
queue_capacity = 1234
num_threads = 3
name = "my_batch"
self.assertRaisesRegexp(
ValueError, "No files match",
tf.contrib.learn.io.read_batch_examples,
_INVALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, None, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, -1, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid queue_capacity",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=None,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid num_threads",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=None, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid num_threads",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=-1, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, queue_capacity + 1, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=1, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid num_epochs",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=-1, queue_capacity=queue_capacity, num_threads=1,
name=name)
def test_batch_record_features(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
features = {"feature": tf.FixedLenFeature(shape=[0], dtype=tf.float32)}
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
features = tf.contrib.learn.io.read_batch_record_features(
_VALID_FILE_PATTERN, batch_size, features, randomize_input=False,
queue_capacity=queue_capacity, reader_num_threads=2,
parser_num_threads=2, name=name)
self.assertEquals("%s/parse_example_batch_join:0" % name,
features["feature"].name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/fifo_queue" % name
parse_example_queue_name = "%s/parse_example_batch_join" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueue",
"%s/read/TFRecordReader" % name: "TFRecordReader",
example_queue_name: "FIFOQueue",
parse_example_queue_name: "QueueDequeueMany",
name: "QueueDequeueMany"
}, g)
self.assertAllEqual(_FILE_NAMES, sess.run(["%s:0" % file_names_name])[0])
self.assertEqual(
queue_capacity, op_nodes[example_queue_name].attr["capacity"].i)
def test_one_epoch(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
inputs = tf.contrib.learn.io.read_batch_examples(
_VALID_FILE_PATTERN, batch_size,
reader=tf.TFRecordReader, randomize_input=True,
num_epochs=1,
queue_capacity=queue_capacity, name=name)
self.assertEquals("%s:0" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_name_queue_limit_name = (
"%s/limit_epochs/epochs" % file_name_queue_name)
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueue",
"%s/read/TFRecordReader" % name: "TFRecordReader",
example_queue_name: "RandomShuffleQueue",
name: "QueueDequeueMany",
file_name_queue_limit_name: "Variable"
}, g)
self.assertEqual(
set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0]))
self.assertEqual(
queue_capacity, op_nodes[example_queue_name].attr["capacity"].i)
def test_batch_randomized(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
inputs = tf.contrib.learn.io.read_batch_examples(
_VALID_FILE_PATTERN, batch_size,
reader=tf.TFRecordReader, randomize_input=True,
queue_capacity=queue_capacity, name=name)
self.assertEquals("%s:0" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueue",
"%s/read/TFRecordReader" % name: "TFRecordReader",
example_queue_name: "RandomShuffleQueue",
name: "QueueDequeueMany"
}, g)
self.assertEqual(
set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0]))
self.assertEqual(
queue_capacity, op_nodes[example_queue_name].attr["capacity"].i)
def test_read_csv(self):
gfile.Glob = self._orig_glob
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "file.csv")
gfile.Open(filename, "w").write("ABC\nDEF\nGHK\n")
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
filename, batch_size,
reader=tf.TextLineReader, randomize_input=False,
num_epochs=1, queue_capacity=queue_capacity, name=name)
session.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import pprint
import random
import uuid
from xml.sax import saxutils
import zlib
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from oslo.utils import units
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.xenapi.client import session as xenapi_session
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug("%(text)s: _db_content => %(content)s",
{'text': text, 'content': content})
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = random.randrange(1, 1 << 16)
resident_on = _db_content['host'].keys()[0]
else:
domid = -1
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None):
if other_config is None:
other_config = {}
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False,
'other_config': other_config}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vbd_rec.setdefault('other_config', {})
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VIF_create(vif_ref, vif_rec):
"""Create backref from VM to VIF when VIF is created.
"""
vm_ref = vif_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VIFs'].append(vif_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query[:4] == 'not ':
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if query[:5] != 'field':
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return {str(i): self.details[i] for i in range(len(self.details))}
class SessionBase(object):
"""Base class for Fake Sessions."""
def __init__(self, uri):
self._session = None
xenapi_session.apply_session_helpers(self)
def pool_get_default_SR(self, _1, pool_ref):
return _db_content['pool'].values()[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
ref = None
rec = None
for ref, rec in _db_content['SR'].iteritems():
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type == 'iscsi':
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
# Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_hostname': 'fake-xenhost',
'host_cpu_info': {'cpu_count': 50},
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_xenhost_get_pci_device_details(self, method, args):
"""Simulate the ouput of three pci devices.
Both of those devices are available for pci passtrough but
only one will match with the pci whitelist used in the
method test_pci_passthrough_devices_*().
Return a single list.
"""
# Driver is not pciback
dev_bad1 = ["Slot:\t0000:86:10.0", "Class:\t0604", "Vendor:\t10b5",
"Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"]
# Driver is pciback but vendor and device are bad
dev_bad2 = ["Slot:\t0000:88:00.0", "Class:\t0300", "Vendor:\t0bad",
"Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
# Driver is pciback and vendor, device are used for matching
dev_good = ["Slot:\t0000:87:00.0", "Class:\t0300", "Vendor:\t10de",
"Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good)
return pickle.dumps(lspci_output)
def _plugin_xenhost_get_pci_type(self, method, args):
return pickle.dumps("type-PCI")
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise Failure('Guest does not have a console')
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def _plugin_nova_plugin_version_get_version(self, method, args):
return pickle.dumps("1.2")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def host_call_plugin(self, _1, _2, plugin, method, args):
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
db_ref['domid'] = random.randrange(1, 1 << 16)
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
db_ref['domid'] = -1
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return "fake_migrate_data"
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def VM_remove_from_blocked_operations(self, session, vm_ref, key):
# operation is idempotent, XenServer doesn't care if the key exists
_db_content['VM'][vm_ref]['blocked_operations'].pop(key, None)
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': _db_content['host'].keys()[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug('Calling %(name)s %(impl)s',
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug('Calling getter %s', name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug('Calling setter %s', name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug('Raising NotImplemented')
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
|
from __future__ import absolute_import
import datetime
import ujson
import zlib
from django.utils.translation import ugettext as _
from six import binary_type, text_type
from zerver.lib.avatar import get_avatar_url
from zerver.lib.avatar_hash import gravatar_hash
import zerver.lib.bugdown as bugdown
from zerver.lib.cache import cache_with_key, to_dict_cache_key
from zerver.lib.request import JsonableError
from zerver.lib.str_utils import force_bytes, dict_with_str_keys
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import (
get_display_recipient_by_id,
Message,
Recipient,
Stream,
UserProfile,
UserMessage,
Reaction
)
from typing import Any, Dict, List, Optional, Tuple
RealmAlertWords = Dict[int, List[text_type]]
def extract_message_dict(message_bytes):
# type: (binary_type) -> Dict[str, Any]
return dict_with_str_keys(ujson.loads(zlib.decompress(message_bytes).decode("utf-8")))
def stringify_message_dict(message_dict):
# type: (Dict[str, Any]) -> binary_type
return zlib.compress(force_bytes(ujson.dumps(message_dict)))
def message_to_dict(message, apply_markdown):
# type: (Message, bool) -> Dict[str, Any]
json = message_to_dict_json(message, apply_markdown)
return extract_message_dict(json)
@cache_with_key(to_dict_cache_key, timeout=3600*24)
def message_to_dict_json(message, apply_markdown):
# type: (Message, bool) -> binary_type
return MessageDict.to_dict_uncached(message, apply_markdown)
class MessageDict(object):
@staticmethod
def to_dict_uncached(message, apply_markdown):
# type: (Message, bool) -> binary_type
dct = MessageDict.to_dict_uncached_helper(message, apply_markdown)
return stringify_message_dict(dct)
@staticmethod
def to_dict_uncached_helper(message, apply_markdown):
# type: (Message, bool) -> Dict[str, Any]
return MessageDict.build_message_dict(
apply_markdown = apply_markdown,
message = message,
message_id = message.id,
last_edit_time = message.last_edit_time,
edit_history = message.edit_history,
content = message.content,
subject = message.subject,
pub_date = message.pub_date,
rendered_content = message.rendered_content,
rendered_content_version = message.rendered_content_version,
sender_id = message.sender.id,
sender_email = message.sender.email,
sender_realm_domain = message.sender.realm.domain,
sender_full_name = message.sender.full_name,
sender_short_name = message.sender.short_name,
sender_avatar_source = message.sender.avatar_source,
sender_is_mirror_dummy = message.sender.is_mirror_dummy,
sending_client_name = message.sending_client.name,
recipient_id = message.recipient.id,
recipient_type = message.recipient.type,
recipient_type_id = message.recipient.type_id,
reactions = Reaction.get_raw_db_rows([message.id])
)
@staticmethod
def build_dict_from_raw_db_row(row, apply_markdown):
# type: (Dict[str, Any], bool) -> Dict[str, Any]
'''
row is a row from a .values() call, and it needs to have
all the relevant fields populated
'''
return MessageDict.build_message_dict(
apply_markdown = apply_markdown,
message = None,
message_id = row['id'],
last_edit_time = row['last_edit_time'],
edit_history = row['edit_history'],
content = row['content'],
subject = row['subject'],
pub_date = row['pub_date'],
rendered_content = row['rendered_content'],
rendered_content_version = row['rendered_content_version'],
sender_id = row['sender_id'],
sender_email = row['sender__email'],
sender_realm_domain = row['sender__realm__domain'],
sender_full_name = row['sender__full_name'],
sender_short_name = row['sender__short_name'],
sender_avatar_source = row['sender__avatar_source'],
sender_is_mirror_dummy = row['sender__is_mirror_dummy'],
sending_client_name = row['sending_client__name'],
recipient_id = row['recipient_id'],
recipient_type = row['recipient__type'],
recipient_type_id = row['recipient__type_id'],
reactions=row['reactions']
)
@staticmethod
def build_message_dict(
apply_markdown,
message,
message_id,
last_edit_time,
edit_history,
content,
subject,
pub_date,
rendered_content,
rendered_content_version,
sender_id,
sender_email,
sender_realm_domain,
sender_full_name,
sender_short_name,
sender_avatar_source,
sender_is_mirror_dummy,
sending_client_name,
recipient_id,
recipient_type,
recipient_type_id,
reactions
):
# type: (bool, Message, int, datetime.datetime, text_type, text_type, text_type, datetime.datetime, text_type, Optional[int], int, text_type, text_type, text_type, text_type, text_type, bool, text_type, int, int, int, List[Dict[str, Any]]) -> Dict[str, Any]
avatar_url = get_avatar_url(sender_avatar_source, sender_email)
display_recipient = get_display_recipient_by_id(
recipient_id,
recipient_type,
recipient_type_id
)
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, text_type)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and his self, preserving ordering
recip = {'email': sender_email,
'domain': sender_realm_domain,
'full_name': sender_full_name,
'short_name': sender_short_name,
'id': sender_id,
'is_mirror_dummy': sender_is_mirror_dummy}
if recip['email'] < display_recipient[0]['email']:
display_recipient = [recip, display_recipient[0]]
elif recip['email'] > display_recipient[0]['email']:
display_recipient = [display_recipient[0], recip]
obj = dict(
id = message_id,
sender_email = sender_email,
sender_full_name = sender_full_name,
sender_short_name = sender_short_name,
sender_domain = sender_realm_domain,
sender_id = sender_id,
type = display_type,
display_recipient = display_recipient,
recipient_id = recipient_id,
subject = subject,
timestamp = datetime_to_timestamp(pub_date),
gravatar_hash = gravatar_hash(sender_email), # Deprecated June 2013
avatar_url = avatar_url,
client = sending_client_name)
obj['subject_links'] = bugdown.subject_links(sender_realm_domain.lower(), subject)
if last_edit_time != None:
obj['last_edit_timestamp'] = datetime_to_timestamp(last_edit_time)
obj['edit_history'] = ujson.loads(edit_history)
if apply_markdown:
if Message.need_to_render_content(rendered_content, rendered_content_version, bugdown.version):
if message is None:
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of bugdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the bugdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate bugdown dependencies
message = Message.objects.select_related().get(id=message_id)
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = render_markdown(message, content, sender_realm_domain)
message.rendered_content = rendered_content
message.rendered_content_version = bugdown.version
message.save_rendered_content()
if rendered_content is not None:
obj['content'] = rendered_content
else:
obj['content'] = u'<p>[Zulip note: Sorry, we could not understand the formatting of your message]</p>'
obj['content_type'] = 'text/html'
else:
obj['content'] = content
obj['content_type'] = 'text/x-markdown'
obj['reactions'] = [ReactionDict.build_dict_from_raw_db_row(reaction)
for reaction in reactions]
return obj
class ReactionDict(object):
@staticmethod
def build_dict_from_raw_db_row(row):
# type: (Dict[str, Any]) -> Dict[str, Any]
return {'emoji_name': row.get('emoji_name'),
'user': {'email': row.get('user_profile__email'),
'id': row.get('user_profile__id'),
'full_name': row.get('user_profile__full_name')}}
def re_render_content_for_management_command(message):
# type: (Message) -> None
'''
Please avoid using this function, as its only used in a management command that
is somewhat deprecated.
'''
assert Message.need_to_render_content(message.rendered_content,
message.rendered_content_version,
bugdown.version)
rendered_content = render_markdown(message, message.content)
message.rendered_content = rendered_content
message.rendered_content_version = bugdown.version
message.save_rendered_content()
def access_message(user_profile, message_id):
# type: (UserProfile, int) -> Tuple[Message, UserMessage]
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
"""
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
try:
user_message = UserMessage.objects.select_related().get(user_profile=user_profile,
message=message)
except UserMessage.DoesNotExist:
user_message = None
if user_message is None:
if message.recipient.type != Recipient.STREAM:
# You can't access private messages you didn't receive
raise JsonableError(_("Invalid message(s)"))
stream = Stream.objects.get(id=message.recipient.type_id)
if not stream.is_public():
# You can't access messages sent to invite-only streams
# that you didn't receive
raise JsonableError(_("Invalid message(s)"))
# So the message is to a public stream
if stream.realm != user_profile.realm:
# You can't access public stream messages in other realms
raise JsonableError(_("Invalid message(s)"))
# Otherwise, the message must have been sent to a public
# stream in your realm, so return the message, user_message pair
return (message, user_message)
def render_markdown(message, content, domain=None, realm_alert_words=None, message_users=None):
# type: (Message, text_type, Optional[text_type], Optional[RealmAlertWords], Set[UserProfile]) -> text_type
"""Return HTML for given markdown. Bugdown may add properties to the
message object such as `mentions_user_ids` and `mentions_wildcard`.
These are only on this Django object and are not saved in the
database.
"""
if message_users is None:
message_user_ids = set() # type: Set[int]
else:
message_user_ids = {u.id for u in message_users}
message.mentions_wildcard = False
message.is_me_message = False
message.mentions_user_ids = set()
message.alert_words = set()
message.links_for_preview = set()
if not domain:
domain = message.sender.realm.domain
if message.sending_client.name == "zephyr_mirror" and message.sender.realm.is_zephyr_mirror_realm:
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
domain = u"zephyr_mirror"
possible_words = set() # type: Set[text_type]
if realm_alert_words is not None:
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
possible_words.update(set(words))
# DO MAIN WORK HERE -- call bugdown to convert
rendered_content = bugdown.convert(content, domain, message, possible_words)
message.user_ids_with_alert_words = set()
if realm_alert_words is not None:
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
if set(words).intersection(message.alert_words):
message.user_ids_with_alert_words.add(user_id)
message.is_me_message = Message.is_status_message(content, rendered_content)
return rendered_content
|
|
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from Selenium2Library import utils
from Selenium2Library.locators import ElementFinder
from keywordgroup import KeywordGroup
class _ElementKeywords(KeywordGroup):
def __init__(self):
self._element_finder = ElementFinder()
# Public, element lookups
def current_frame_contains(self, text, loglevel='INFO'):
"""Verifies that current frame contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if not self._is_text_present(text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def current_frame_should_not_contain(self, text, loglevel='INFO'):
"""Verifies that current frame contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if self._is_text_present(text):
self.log_source(loglevel)
raise AssertionError("Page should not have contained text '%s' "
"but it did" % text)
self._info("Current page should not contain text '%s'." % text)
def element_should_contain(self, locator, expected, message=''):
"""Verifies element identified by `locator` contains text `expected`.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' contains text '%s'."
% (locator, expected))
actual = self._get_text(locator)
if not expected in actual:
if not message:
message = "Element '%s' should have contained text '%s' but "\
"its text was '%s'." % (locator, expected, actual)
raise AssertionError(message)
def frame_should_contain(self, locator, text, loglevel='INFO'):
"""Verifies frame identified by `locator` contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
if not self._frame_contains(locator, text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def page_should_contain(self, text, loglevel='INFO'):
"""Verifies that current page contains `text`.
If this keyword fails, it automatically logs the page source
using the log level specified with the optional `loglevel` argument.
Giving `NONE` as level disables logging.
"""
if not self._page_contains(text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def page_should_contain_element(self, locator, message='', loglevel='INFO'):
"""Verifies element identified by `locator` is found on the current page.
`message` can be used to override default error message.
See `Page Should Contain` for explanation about `loglevel` argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, None, message, loglevel)
def page_should_not_contain(self, text, loglevel='INFO'):
"""Verifies the current page does not contain `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if self._page_contains(text):
self.log_source(loglevel)
raise AssertionError("Page should not have contained text '%s'" % text)
self._info("Current page does not contain text '%s'." % text)
def page_should_not_contain_element(self, locator, message='', loglevel='INFO'):
"""Verifies element identified by `locator` is not found on the current page.
`message` can be used to override the default error message.
See `Page Should Contain ` for explanation about `loglevel` argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, None, message, loglevel)
# Public, attributes
def assign_id_to_element(self, locator, id):
"""Assigns a temporary identifier to element specified by `locator`.
This is mainly useful if the locator is complicated/slow XPath expression.
Identifier expires when the page is reloaded.
Example:
| Assign ID to Element | xpath=//div[@id="first_div"] | my id |
| Page Should Contain Element | my id |
"""
self._info("Assigning temporary id '%s' to element '%s'" % (id, locator))
element = self._element_find(locator, True, True)
self._current_browser().execute_script("arguments[0].id = '%s';" % id, element)
def element_should_be_disabled(self, locator):
"""Verifies that element identified with `locator` is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if self._is_enabled(locator):
raise AssertionError("Element '%s' is enabled." % (locator))
def element_should_be_enabled(self, locator):
"""Verifies that element identified with `locator` is enabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if not self._is_enabled(locator):
raise AssertionError("Element '%s' is disabled." % (locator))
def element_should_be_visible(self, locator, message=''):
"""Verifies that the element identified by `locator` is visible.
Herein, visible means that the element is logically visible, not optically
visible in the current browser viewport. For example, an element that carries
display:none is not logically visible, so using this keyword on that element
would fail.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' is visible." % locator)
visible = self._is_visible(locator)
if not visible:
if not message:
message = "The element '%s' should be visible, but it "\
"is not." % locator
raise AssertionError(message)
def element_should_not_be_visible(self, locator, message=''):
"""Verifies that the element identified by `locator` is NOT visible.
This is the opposite of `Element Should Be Visible`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' is not visible." % locator)
visible = self._is_visible(locator)
if visible:
if not message:
message = "The element '%s' should not be visible, "\
"but it is." % locator
raise AssertionError(message)
def element_text_should_be(self, locator, expected, message=''):
"""Verifies element identified by `locator` exactly contains text `expected`.
In contrast to `Element Should Contain`, this keyword does not try
a substring match but an exact match on the element identified by `locator`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' contains exactly text '%s'."
% (locator, expected))
element = self._element_find(locator, True, True)
actual = element.text
if expected != actual:
if not message:
message = "The text of element '%s' should have been '%s' but "\
"in fact it was '%s'." % (locator, expected, actual)
raise AssertionError(message)
def get_element_attribute(self, attribute_locator):
"""Return value of element attribute.
`attribute_locator` consists of element locator followed by an @ sign
and attribute name, for example "element_id@class".
"""
locator, attribute_name = self._parse_attribute_locator(attribute_locator)
element = self._element_find(locator, True, False)
if element is None:
raise ValueError("Element '%s' not found." % (locator))
return element.get_attribute(attribute_name)
def get_horizontal_position(self, locator):
"""Returns horizontal position of element identified by `locator`.
The position is returned in pixels off the left side of the page,
as an integer. Fails if a matching element is not found.
See also `Get Vertical Position`.
"""
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("Could not determine position for '%s'" % (locator))
return element.location['x']
def get_value(self, locator):
"""Returns the value attribute of element identified by `locator`.
See `introduction` for details about locating elements.
"""
return self._get_value(locator)
def get_text(self, locator):
"""Returns the text value of element identified by `locator`.
See `introduction` for details about locating elements.
"""
return self._get_text(locator)
def get_vertical_position(self, locator):
"""Returns vertical position of element identified by `locator`.
The position is returned in pixels off the top of the page,
as an integer. Fails if a matching element is not found.
See also `Get Horizontal Position`.
"""
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("Could not determine position for '%s'" % (locator))
return element.location['y']
# Public, mouse input/events
def click_element(self, locator):
"""Click element identified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Clicking element '%s'." % locator)
self._element_find(locator, True, True).click()
def click_element_at_coordinates(self, locator, xoffset, yoffset):
"""Click element identified by `locator` at x/y coordinates of the element.
Cursor is moved and the center of the element and x/y coordinates are
calculted from that point.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Click clicking element '%s' in coordinates '%s', '%s'." % (locator, xoffset, yoffset))
element = self._element_find(locator, True, True)
#self._element_find(locator, True, True).click()
#ActionChains(self._current_browser()).move_to_element_with_offset(element, xoffset, yoffset).click().perform()
ActionChains(self._current_browser()).move_to_element(element).move_by_offset(xoffset, yoffset).click().perform()
def double_click_element(self, locator):
"""Double click element identified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Double clicking element '%s'." % locator)
element = self._element_find(locator, True, True)
ActionChains(self._current_browser()).double_click(element).perform()
def focus(self, locator):
"""Sets focus to element identified by `locator`."""
element = self._element_find(locator, True, True)
self._current_browser().execute_script("arguments[0].focus();", element)
def drag_and_drop(self, source, target):
"""Drags element identified with `source` which is a locator.
Element can be moved on top of another element with `target`
argument.
`target` is a locator of the element where the dragged object is
dropped.
Examples:
| Drag And Drop | elem1 | elem2 | # Move elem1 over elem2. |
"""
src_elem = self._element_find(source,True,True)
trg_elem = self._element_find(target,True,True)
ActionChains(self._current_browser()).drag_and_drop(src_elem, trg_elem).perform()
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""Drags element identified with `source` which is a locator.
Element will be moved by xoffset and yoffset. each of which is a
negative or positive number specify the offset.
Examples:
| Drag And Drop | myElem | 50 | -35 | # Move myElem 50px right and 35px down. |
"""
src_elem = self._element_find(source, True, True)
ActionChains(self._current_browser()).drag_and_drop_by_offset(src_elem, xoffset, yoffset).perform()
def mouse_down(self, locator):
"""Simulates pressing the left mouse button on the element specified by `locator`.
The element is pressed without releasing the mouse button.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
See also the more specific keywords `Mouse Down On Image` and
`Mouse Down On Link`.
"""
self._info("Simulating Mouse Down on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).click_and_hold(element).perform()
def mouse_out(self, locator):
"""Simulates moving mouse away from the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Out on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
size = element.size
offsetx = (size['width'] / 2) + 1
offsety = (size['height'] / 2) + 1
ActionChains(self._current_browser()).move_to_element(element).move_by_offset(offsetx, offsety).perform()
def mouse_over(self, locator):
"""Simulates hovering mouse over the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Over on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).move_to_element(element).perform()
def mouse_up(self, locator):
"""Simulates releasing the left mouse button on the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Up on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).release(element).perform()
def open_context_menu(self, locator):
"""Opens context menu on element identified by `locator`."""
element = self._element_find(locator, True, True)
ActionChains(self._current_browser()).context_click(element).perform()
def simulate(self, locator, event):
"""Simulates `event` on element identified by `locator`.
This keyword is useful if element has OnEvent handler that needs to be
explicitly invoked.
See `introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True)
script = """
element = arguments[0];
eventName = arguments[1];
if (document.createEventObject) { // IE
return element.fireEvent('on' + eventName, document.createEventObject());
}
var evt = document.createEvent("HTMLEvents");
evt.initEvent(eventName, true, true);
return !element.dispatchEvent(evt);
"""
self._current_browser().execute_script(script, element, event)
def press_key(self, locator, key):
"""Simulates user pressing key on element identified by `locator`.
`key` is either a single character, or a numerical ASCII code of the key
lead by '\\'.
Examples:
| Press Key | text_field | q |
| Press Key | login_button | \\13 | # ASCII code for enter key |
"""
if key.startswith('\\') and len(key) > 1:
key = self._map_ascii_key_code_to_key(int(key[1:]))
#if len(key) > 1:
# raise ValueError("Key value '%s' is invalid.", key)
element = self._element_find(locator, True, True)
#select it
element.send_keys(key)
# Public, links
def click_link(self, locator):
"""Clicks a link identified by locator.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
self._info("Clicking link '%s'." % locator)
link = self._element_find(locator, True, True, tag='a')
link.click()
def get_all_links(self):
"""Returns a list containing ids of all links found in current page.
If a link has no id, an empty string will be in the list instead.
"""
links = []
for anchor in self._element_find("tag=a", False, False, 'a'):
links.append(anchor.get_attribute('id'))
return links
def mouse_down_on_link(self, locator):
"""Simulates a mouse down event on a link.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True, 'link')
ActionChains(self._current_browser()).click_and_hold(element).perform()
def page_should_contain_link(self, locator, message='', loglevel='INFO'):
"""Verifies link identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, 'link', message, loglevel)
def page_should_not_contain_link(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, 'link', message, loglevel)
# Public, images
def click_image(self, locator):
"""Clicks an image found by `locator`.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._info("Clicking image '%s'." % locator)
element = self._element_find(locator, True, False, 'image')
if element is None:
# A form may have an image as it's submit trigger.
element = self._element_find(locator, True, True, 'input')
element.click()
def mouse_down_on_image(self, locator):
"""Simulates a mouse down event on an image.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True, 'image')
ActionChains(self._current_browser()).click_and_hold(element).perform()
def page_should_contain_image(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, 'image', message, loglevel)
def page_should_not_contain_image(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, 'image', message, loglevel)
# Public, xpath
def get_matching_xpath_count(self, xpath):
"""Returns number of elements matching `xpath`
If you wish to assert the number of matching elements, use
`Xpath Should Match X Times`.
"""
count = len(self._element_find("xpath=" + xpath, False, False))
return str(count)
def xpath_should_match_x_times(self, xpath, expected_xpath_count, message='', loglevel='INFO'):
"""Verifies that the page contains the given number of elements located by the given `xpath`.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
"""
actual_xpath_count = len(self._element_find("xpath=" + xpath, False, False))
if int(actual_xpath_count) != int(expected_xpath_count):
if not message:
message = "Xpath %s should have matched %s times but matched %s times"\
%(xpath, expected_xpath_count, actual_xpath_count)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page contains %s elements matching '%s'."
% (actual_xpath_count, xpath))
# Private
def _element_find(self, locator, first_only, required, tag=None):
browser = self._current_browser()
elements = self._element_finder.find(browser, locator, tag)
if required and len(elements) == 0:
raise ValueError("Element locator '" + locator + "' did not match any elements.")
if first_only:
if len(elements) == 0: return None
return elements[0]
return elements
def _frame_contains(self, locator, text):
browser = self._current_browser()
element = self._element_find(locator, True, True)
browser.switch_to_frame(element)
self._info("Searching for text from frame '%s'." % locator)
found = self._is_text_present(text)
browser.switch_to_default_content()
return found
def _get_text(self, locator):
element = self._element_find(locator, True, True)
if element is not None:
return element.text
return None
def _get_value(self, locator, tag=None):
element = self._element_find(locator, True, False, tag=tag)
return element.get_attribute('value') if element is not None else None
def _is_enabled(self, locator):
element = self._element_find(locator, True, True)
if not self._is_form_element(element):
raise AssertionError("ERROR: Element %s is not an input." % (locator))
if not element.is_enabled():
return False
read_only = element.get_attribute('readonly')
if read_only == 'readonly' or read_only == 'true':
return False
return True
def _is_text_present(self, text):
locator = "xpath=//*[contains(., %s)]" % utils.escape_xpath_value(text);
return self._is_element_present(locator)
def _is_visible(self, locator):
element = self._element_find(locator, True, False)
if element is not None:
return element.is_displayed()
return None
def _map_ascii_key_code_to_key(self, key_code):
map = {
0: Keys.NULL,
8: Keys.BACK_SPACE,
9: Keys.TAB,
10: Keys.RETURN,
13: Keys.ENTER,
24: Keys.CANCEL,
27: Keys.ESCAPE,
32: Keys.SPACE,
42: Keys.MULTIPLY,
43: Keys.ADD,
44: Keys.SEPARATOR,
45: Keys.SUBTRACT,
56: Keys.DECIMAL,
57: Keys.DIVIDE,
59: Keys.SEMICOLON,
61: Keys.EQUALS,
127: Keys.DELETE
}
key = map.get(key_code)
if key is None:
key = chr(key_code)
return key
def _parse_attribute_locator(self, attribute_locator):
parts = attribute_locator.rpartition('@')
if len(parts[0]) == 0:
raise ValueError("Attribute locator '%s' does not contain an element locator." % (attribute_locator))
if len(parts[2]) == 0:
raise ValueError("Attribute locator '%s' does not contain an attribute name." % (attribute_locator))
return (parts[0], parts[2])
def _is_element_present(self, locator, tag=None):
return (self._element_find(locator, True, False, tag=tag) != None)
def _page_contains(self, text):
browser = self._current_browser()
browser.switch_to_default_content()
if self._is_text_present(text):
return True
subframes = self._element_find("xpath=//frame|//iframe", False, False)
self._debug('Current frame has %d subframes' % len(subframes))
for frame in subframes:
browser.switch_to_frame(frame)
found_text = self._is_text_present(text)
browser.switch_to_default_content()
if found_text:
return True
return False
def _page_should_contain_element(self, locator, tag, message, loglevel):
element_name = tag if tag is not None else 'element'
if not self._is_element_present(locator, tag):
if not message:
message = "Page should have contained %s '%s' but did not"\
% (element_name, locator)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page contains %s '%s'." % (element_name, locator))
def _page_should_not_contain_element(self, locator, tag, message, loglevel):
element_name = tag if tag is not None else 'element'
if self._is_element_present(locator, tag):
if not message:
message = "Page should not have contained %s '%s'"\
% (element_name, locator)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page does not contain %s '%s'."
% (element_name, locator))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import ndarray as nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
|
|
'''
Created on Apr 8, 2014
@author: cmills
'''
from tasr_test import TASRTestCase
from tasr.headers import SchemaHeaderBot, SubjectHeaderBot
import unittest
from webtest import TestApp
import StringIO
import tasr.app
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
class TestTASRTopicApp(TASRTestCase):
'''These tests check that the TASR native REST API (including the get by ID
calls) are working as expected. This does not check the S+V API calls.
'''
def setUp(self):
self.event_type = "gold"
fix_rel_path = "schemas/%s.avsc" % (self.event_type)
self.avsc_file = TASRTestCase.get_fixture_file(fix_rel_path, "r")
self.schema_str = self.avsc_file.read()
self.tasr_app = TestApp(APP)
self.url_prefix = 'http://%s:%s/tasr' % (APP.config.host,
APP.config.port)
self.topic_url = '%s/topic/%s' % (self.url_prefix, self.event_type)
self.content_type = 'application/json; charset=utf8'
# clear out all the keys before beginning -- careful!
APP.ASR.redis.flushdb()
def tearDown(self):
# this clears out redis after each test -- careful!
APP.ASR.redis.flushdb()
def abort_diff_status(self, resp, code):
self.assertEqual(code, resp.status_code,
u'Non-%s status code: %s' % (code, resp.status_code))
def register_schema(self, schema_str, expect_errors=False):
return self.tasr_app.request(self.topic_url, method='PUT',
expect_errors=expect_errors,
content_type=self.content_type,
body=schema_str)
def test_get_all_topics(self):
'''GET /tasr/topic - as expected'''
# reg two vers for target topic and one for an alt topic
self.register_schema(self.schema_str)
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.alt', 1)
self.register_schema(schema_str_2)
alt_topic = 'bob'
alt_url = '%s/topic/%s' % (self.url_prefix, alt_topic)
self.tasr_app.request(alt_url, method='PUT',
content_type=self.content_type,
body=self.schema_str)
# now get all with versions and check the headers
url = "%s/topic" % (self.url_prefix)
resp = self.tasr_app.request(url, method='GET')
self.abort_diff_status(resp, 200)
# we expect a list of SubjectMetadata objects here
meta_dict = SubjectHeaderBot.extract_metadata(resp)
self.assertEqual(2, meta_dict[self.event_type].current_version,
'bad ver')
self.assertEqual(1, meta_dict[alt_topic].current_version, 'bad ver')
# lastly check the body
buff = StringIO.StringIO(resp.body)
group_names = []
for topic_line in buff:
group_names.append(topic_line.strip())
buff.close()
self.assertListEqual(sorted(group_names), sorted(meta_dict.keys()),
'Expected group_names in body to match headers.')
def test_register_schema(self):
'''PUT /tasr/topic/<topic name> - as expected'''
resp = self.register_schema(self.schema_str)
self.abort_diff_status(resp, 201)
smeta = SchemaHeaderBot.extract_metadata(resp)
self.assertIn(self.event_type, smeta.group_names, 'event_type missing')
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
self.assertTrue(smeta.group_timestamp(self.event_type), 'missing ts')
def test_reg_fail_on_empty_schema(self):
'''PUT /tasr/topic/<topic name> - empty schema'''
resp = self.register_schema(None, expect_errors=True)
self.abort_diff_status(resp, 400)
def test_reg_fail_on_invalid_schema(self):
'''PUT /tasr/topic/<topic name> - bad schema'''
bad_schema_str = "%s }" % self.schema_str
resp = self.register_schema(bad_schema_str, expect_errors=True)
self.abort_diff_status(resp, 400)
def test_reg_fail_on_bad_content_type(self):
'''PUT /tasr/topic/<topic name> - bad Content-Type'''
resp = self.tasr_app.request(self.topic_url, method='PUT',
content_type='text/plain; charset=utf8',
expect_errors=True,
body=self.schema_str)
self.abort_diff_status(resp, 406)
def test_reg_and_rereg(self):
'''PUT /tasr/topic/<topic name> - multiple calls, same schema'''
resp = self.register_schema(self.schema_str)
self.abort_diff_status(resp, 201)
smeta = SchemaHeaderBot.extract_metadata(resp)
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
# on the re-registration, we should get the same version back
resp2 = self.register_schema(self.schema_str)
self.abort_diff_status(resp2, 200)
smeta2 = SchemaHeaderBot.extract_metadata(resp2)
self.assertEqual(1, smeta2.group_version(self.event_type),
'Re-reg produced a different group version.')
def test_multi_topic_reg(self):
'''PUT /tasr/topic/<topic name> - multiple group_names, same schema'''
put_resp = self.register_schema(self.schema_str)
self.abort_diff_status(put_resp, 201)
smeta = SchemaHeaderBot.extract_metadata(put_resp)
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
alt_topic = 'bob'
alt_url = '%s/topic/%s' % (self.url_prefix, alt_topic)
put_resp2 = self.tasr_app.request(alt_url, method='PUT',
content_type=self.content_type,
body=self.schema_str)
self.abort_diff_status(put_resp2, 201)
smeta2 = SchemaHeaderBot.extract_metadata(put_resp2)
self.assertEqual(1, smeta2.group_version(alt_topic), 'bad ver')
# getting by ID gives us all topic associations in headers
id_url = "%s/id/%s" % (self.url_prefix, smeta.sha256_id)
get_resp = self.tasr_app.request(id_url, method='GET')
smeta3 = SchemaHeaderBot.extract_metadata(get_resp)
self.assertEqual(1, smeta3.group_version(self.event_type), 'bad ver')
self.assertEqual(1, smeta3.group_version(alt_topic), 'bad ver')
def test_get_latest_1(self):
'''GET /tasr/topic/<topic name> - as expected'''
put_resp = self.register_schema(self.schema_str)
# the canonicalized form returned has normalized whitespace
canonicalized_schema_str = put_resp.body
# now pull it back with a GET
get_resp = self.tasr_app.request(self.topic_url, method='GET')
self.abort_diff_status(get_resp, 200)
smeta = SchemaHeaderBot.extract_metadata(get_resp)
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
self.assertEqual(canonicalized_schema_str, get_resp.body,
u'Unexpected body: %s' % get_resp.body)
def test_get_latest_2(self):
'''GET /tasr/topic/<topic name>/latest - as expected'''
put_resp = self.register_schema(self.schema_str)
# the canonicalized form returned has normalized whitespace
canonicalized_schema_str = put_resp.body
# now pull it back with a GET
get_url = '%s/latest' % self.topic_url
get_resp = self.tasr_app.request(get_url, method='GET')
self.abort_diff_status(get_resp, 200)
smeta = SchemaHeaderBot.extract_metadata(get_resp)
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
self.assertEqual(canonicalized_schema_str, get_resp.body,
u'Unexpected body: %s' % get_resp.body)
def test_reg_50_and_get_by_version(self):
'''GET /tasr/topic/<topic name>/version/<version> - as expected'''
schemas = []
# add a bunch of versions for our topic
for v in range(1, 50):
ver_schema_str = self.schema_str.replace('tagged.events',
'tagged.events.%s' % v, 1)
put_resp = self.register_schema(ver_schema_str)
# the canonicalized form returned has normalized whitespace
canonicalized_schema_str = put_resp.body
schemas.append(canonicalized_schema_str)
self.abort_diff_status(put_resp, 201)
# step through and request each version by version number
for v in range(1, 50):
query = "%s/version/%s" % (self.topic_url, v)
get_resp = self.tasr_app.request(query, method='GET')
self.abort_diff_status(get_resp, 200)
self.assertEqual(schemas[v - 1], get_resp.body,
u'Unexpected body: %s' % get_resp.body)
def test_get_for_topic_and_version_fail_on_bad_version(self):
'''GET /tasr/topic/<topic name>/version/<version> - fail on bad ver'''
put_resp = self.register_schema(self.schema_str)
smeta = SchemaHeaderBot.extract_metadata(put_resp)
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
bad_ver = smeta.group_version(self.event_type) + 1
url = "%s/version/%s" % (self.topic_url, bad_ver)
get_resp = self.tasr_app.request(url, method='GET', expect_errors=True)
self.abort_diff_status(get_resp, 404)
def test_get_for_stale_version(self):
'''GET /tasr/topic/<topic name>/version/<version> - 1 schema, 2 vers'''
put_resp = self.register_schema(self.schema_str)
# the canonicalized form returned has normalized whitespace
canonicalized_schema_str = put_resp.body
self.abort_diff_status(put_resp, 201)
schema_str_2 = self.schema_str.replace('tagged.events',
'tagged.events.alt', 1)
put_resp2 = self.register_schema(schema_str_2)
self.abort_diff_status(put_resp2, 201)
put_resp3 = self.register_schema(self.schema_str)
smeta = SchemaHeaderBot.extract_metadata(put_resp3)
self.assertEqual(3, smeta.group_version(self.event_type), 'bad ver')
# now get version 1 -- should be same schema, but diff ver in headers
url = "%s/version/%s" % (self.topic_url, 1)
get_resp = self.tasr_app.request(url, method='GET', expect_errors=True)
self.abort_diff_status(get_resp, 200)
self.assertEqual(canonicalized_schema_str, get_resp.body,
u'Unexpected body: %s' % get_resp.body)
smeta = SchemaHeaderBot.extract_metadata(get_resp)
self.assertEqual(1, smeta.group_version(self.event_type), 'bad ver')
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(TestTASRTopicApp)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
|
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# processing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [
'Process', 'currentProcess', 'activeChildren'
]
#
# Imports
#
import os
import sys
import time
import signal
import atexit
import weakref
import copy_reg
import itertools
#
# Public functions
#
def currentProcess():
'''
Return process object representing the current process
'''
return _current_process
def activeChildren():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
'''
Purge `_children` of dead processes
'''
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
counter = _current_process._counter.next()
self._identity = _current_process._identity + (counter,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._parent_pid = os.getpid()
self._popen = None
self._exiting = False
self._target = target
self._args = tuple(args)
self._kwargs = kwargs.copy()
self._name = name or 'Process-' + ':'.join(map(str, self._identity))
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
from processing.forking import Popen
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
_cleanup()
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends `SIGTERM` signal or uses `TerminateProcess()`
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
if timeout == 0:
res = self._popen.poll()
elif timeout is None:
res = self._popen.wait()
else:
res = self._popen.waitTimeout(timeout)
if res is not None:
_current_process._children.discard(self)
def isAlive(self):
'''
Return whether child process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
def getName(self):
'''
Return name of process
'''
return self._name
def setName(self, name):
'''
Set name of process
'''
assert type(name) is str, 'name must be a string'
self._name = name
def isDaemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
def setDaemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
def getAuthKey(self):
'''
Return authorization key of process
'''
return self._authkey
def setAuthKey(self, authkey):
'''
Set authorization key of process
'''
assert type(authkey) is str, 'value must be a string'
self._authkey = authkey
def getExitCode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
def getPid(self):
'''
Return PID of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
assert self._parent_pid == os.getpid(), 'not a child process'
return self._popen and self._popen.pid
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.getExitCode()
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from processing.finalize import _registry
from processing.logger import info
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
sys.stdin.close()
_registry.clear()
_current_process = self
_runAfterForkers()
info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
_exitFunction()
except SystemExit, e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
print >>sys.stderr, e.args[0]
exitcode = 1
except:
exitcode = 1
import traceback
print >>sys.stderr, 'Process %s:' % self.getName()
traceback.print_exc()
info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = ''.join('%02x' % ord(c) for c in os.urandom(16))
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
#
# Make bound and unbound instance methods and class methods picklable
#
def _reduceMethod(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(type(_current_process.start), _reduceMethod)
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterForkerId = itertools.count().next
def _runAfterForkers():
# execute in order of registration
for (index, ident, func), obj in sorted(_afterfork_registry.items()):
func(obj)
def _registerAfterFork(obj, func):
_afterfork_registry[(_afterForkerId(), id(obj), func)] = obj
#
# Clean up on exit
#
def _exitFunction():
from processing.finalize import _runFinalizers
from processing.logger import info
_current_process._exiting = True
info('running all "atexit" finalizers with priority >= 0')
_runFinalizers(0)
for p in activeChildren():
if p._daemonic:
info('calling `terminate()` for daemon %s', p.getName())
p._popen.terminate()
for p in activeChildren():
info('calling `join()` for process %s', p.getName())
p.join()
info('running the remaining "atexit" finalizers')
_runFinalizers()
atexit.register(_exitFunction)
|
|
import typing
import binascii
import themis.cbuild
import themis.auto
import themis.channel
import themis.codegen
import themis.codehelpers
import themis.joystick
import themis.pwm
import themis.timers
Mode = themis.channel.Discrete("DISABLED AUTONOMOUS TELEOP TESTING")
JOYSTICK_NUM = 6
AXIS_NUM = 12
MAX_BUTTON_NUM = 32
PWM_NUM = 20
PCM_NUM = 63
SOLENOID_NUM = 8
GPIO_NUM = 26
INTERRUPT_NUM = 8
class RoboRIO:
def __init__(self): # we have the required accessors mimic the actual route to a device. good idea? not sure.
themis.codegen.add_init_call("ds_init", themis.codegen.InitPhase.PHASE_PREINIT_IO)
self.driver_station = DriverStation()
self.can = CAN()
self.pwm = PWM()
self.gpio = GPIO()
def get_mode(self) -> themis.channel.DiscreteInput:
return self.driver_station.get_mode()
def is_mode(self, mode: Mode) -> themis.channel.BooleanInput:
return self.get_mode().is_value(mode)
def run_during_auto(self, autonomous: themis.auto.AutonomousType):
should_run = self.is_mode(Mode.AUTONOMOUS)
themis.auto.run_autonomous_while(should_run, autonomous)
# TODO: use appropriate exceptions for argument ranges instead of assertions
class GPIO:
UNASSIGNED = 0
INPUT = 1
OUTPUT = 2
def __init__(self):
self._gpio_assignments = [GPIO.UNASSIGNED] * GPIO_NUM
self._next_interrupt = 0
self._poll_event = themis.timers.ticker(millis=20)
def _alloc_interrupt(self) -> int:
if self._next_interrupt >= INTERRUPT_NUM:
raise Exception("Too many interrupts - can only allocate %d GPIO inputs with interrupts" % INTERRUPT_NUM)
last = self._next_interrupt
self._next_interrupt += 1
return last
def input(self, gpio_pin, interrupt=False) -> themis.channel.BooleanInput:
assert self._gpio_assignments[gpio_pin] == GPIO.UNASSIGNED
self._gpio_assignments[gpio_pin] = GPIO.INPUT
if interrupt:
interrupt_id = self._alloc_interrupt()
themis.codegen.add_init_call("gpio_init_input_interrupt",
themis.codegen.InitPhase.PHASE_INIT_IO, gpio_pin, interrupt_id)
update_out, update_in = themis.channel.event_cell()
themis.codegen.add_init_call("gpio_start_interrupt", themis.codegen.InitPhase.PHASE_BEGIN,
gpio_pin, interrupt_id, update_out)
return themis.codehelpers.poll_boolean(update_in, "gpio_poll_input", (gpio_pin,), False)
else:
themis.codegen.add_init_call("gpio_init_input_poll",
themis.codegen.InitPhase.PHASE_INIT_IO, gpio_pin)
return themis.codehelpers.poll_boolean(self._poll_event, "gpio_poll_input", (gpio_pin,), False)
class PWM:
def __init__(self):
pass
def _frequency_to_squelch(self, freq: float) -> int:
assert freq > 0
if freq >= 133:
return 0 # no squelching: 198 Hz
elif freq >= 67:
return 1 # half squelching: 99 Hz
else:
return 3 # full squelching: 49.5 Hz
def talon_sr(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.TALON_SR)
def jaguar(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.JAGUAR)
def victor_old(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.VICTOR_OLD)
def servo(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.SERVO, latch_zero=True)
def victor_sp(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.VICTOR_SP)
def spark(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.SPARK)
def sd540(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.SD540)
def talon_srx(self, pwm_id: int) -> themis.channel.FloatOutput:
return self.pwm_controller(pwm_id, themis.pwm.TALON_SRX)
def pwm_controller(self, pwm_id: int, specs: themis.pwm.SpeedControlSpecs,
latch_zero: bool = False) -> themis.channel.FloatOutput:
return themis.pwm.filter_to(specs, self.pwm_raw(pwm_id, specs.frequency_hz, latch_zero=latch_zero))
def pwm_raw(self, pwm_id: int, frequency: float, latch_zero: bool = False) -> themis.channel.FloatOutput:
assert 0 <= pwm_id < PWM_NUM
squelch = self._frequency_to_squelch(frequency)
themis.codegen.add_init_call("pwm_init", themis.codegen.InitPhase.PHASE_INIT_IO, pwm_id, squelch, latch_zero)
return themis.codehelpers.push_float("pwm_update", extra_args=(pwm_id,))
class CAN: # TODO: implement!
def __init__(self):
# TODO: support multiple PCMs
self.pcm = PCM(0)
class PCM:
def __init__(self, pcm_id):
assert 0 <= pcm_id < PCM_NUM
self._id = pcm_id
def solenoid(self, solenoid_id):
assert 0 <= solenoid_id < SOLENOID_NUM
themis.codegen.add_init_call("solenoid_init", themis.codegen.InitPhase.PHASE_INIT_IO, self._id,
solenoid_id)
return themis.codehelpers.push_boolean("solenoid_update", extra_args=(self._id, solenoid_id))
class DriverStation:
def __init__(self):
update_out, self._update = themis.channel.event_cell()
themis.codegen.add_init_call("ds_begin", themis.codegen.InitPhase.PHASE_BEGIN, update_out)
self.joysticks = [Joystick(i, self._update) for i in range(JOYSTICK_NUM)]
self._get_mode = None
def get_mode(self) -> themis.channel.DiscreteInput:
if self._get_mode is None:
self._get_mode = themis.codehelpers.poll_discrete(self._update, "get_robot_mode", (), Mode.DISABLED, Mode)
return self._get_mode
def joystick(self, i):
return self.joysticks[i - 1]
class Joystick(themis.joystick.Joystick):
def __init__(self, i: int, event_update_joysticks):
self._index = i
self._update = event_update_joysticks
self._axes = [None] * AXIS_NUM
self._buttons = [None] * MAX_BUTTON_NUM
def _make_axis(self, i) -> themis.channel.FloatInput:
return themis.codehelpers.poll_float(self._update, "get_joystick_axis", (self._index, i), 0)
def _make_button(self, i) -> themis.channel.BooleanInput:
return themis.codehelpers.poll_boolean(self._update, "get_joystick_button", (self._index, i), False)
def axis(self, axis_num) -> themis.channel.FloatInput:
axis_num -= 1
if self._axes[axis_num] is None:
self._axes[axis_num] = self._make_axis(axis_num)
return self._axes[axis_num]
def button(self, button_num) -> themis.channel.BooleanInput:
button_num -= 1
if self._buttons[button_num] is None:
self._buttons[button_num] = self._make_button(button_num)
return self._buttons[button_num]
def deploy_roboRIO(team_number: int, code):
print("=============================================")
print("Would deploy code to", team_number)
# TODO: don't output to this kind of fixed path
with open("/tmp/output.elf", "wb") as fout:
fout.write(code)
print("/tmp/output.elf")
print("=============================================")
GCC_PREFIX = "arm-frc-linux-gnueabi-"
# note: these flags are duplicated in themis-frc-hal/CMakeLists.txt
C_FLAGS = "-Wformat=2 -Wall -Wextra -Werror -pedantic -Wno-psabi -Wno-unused-parameter -fPIC -Os -g0 -rdynamic " \
"-std=c11 -D_POSIX_C_SOURCE=199309L"
def compile_roboRIO(c_code):
return themis.cbuild.build_program(c_code, "themis.h", "libthemis-frc.so", GCC_PREFIX, C_FLAGS, __name__)
def robot(team_number: int, robot_constructor: typing.Callable[[RoboRIO], None]):
with themis.codegen.GenerationContext().enter():
roboRIO = RoboRIO()
robot_constructor(roboRIO)
compiled_code = compile_roboRIO(themis.codegen.generate_code())
deploy_roboRIO(team_number, compiled_code)
|
|
'''
Created on December 25, 2016
Yes, I created this on Christmas day.
This is my gift to you.
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
import importlib
import datetime
import utilities.utilities as utilities
import utilities.analytics as analytics
from intelligence.intelligence import Intelligence
# Sunrise identifier for timers
SUNRISE = "sunrise"
# Sunset identifier for timers
SUNSET = "sunset"
class LocationDaylightMicroservice(Intelligence):
"""
Determine sunrise and sunset times for the location
"""
def __init__(self, botengine, parent):
"""
Instantiate this object
:param parent: Parent object, either a location or a device object.
"""
Intelligence.__init__(self, botengine, parent)
if self.parent.latitude is not None and self.parent.longitude is not None:
self._set_sunrise_sunset_alarm(botengine)
# Initialize the 'is_daylight' class variable in the Location object.
self.parent.is_daylight = self.is_daylight(botengine)
def initialize(self, botengine):
"""
Initialize
:param botengine: BotEngine environment
"""
if not self.is_timer_running(botengine) and not botengine.is_executing_timer():
if self.parent.latitude is not None and self.parent.longitude is not None:
self._set_sunrise_sunset_alarm(botengine)
return
def destroy(self, botengine):
"""
This device or object is getting permanently deleted - it is no longer in the user's account.
:param botengine: BotEngine environment
"""
return
def mode_updated(self, botengine, current_mode):
"""
Mode was updated
:param botengine: BotEngine environment
:param current_mode: Current mode
:param current_timestamp: Current timestamp
"""
return
def device_measurements_updated(self, botengine, device_object):
"""
Device was updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_metadata_updated(self, botengine, device_object):
"""
Evaluate a device that is new or whose goal/scenario was recently updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_alert(self, botengine, device_object, alert_type, alert_params):
"""
Device sent an alert.
When a device disconnects, it will send an alert like this: [{u'alertType': u'status', u'params': [{u'name': u'deviceStatus', u'value': u'2'}], u'deviceId': u'eb10e80a006f0d00'}]
When a device reconnects, it will send an alert like this: [{u'alertType': u'on', u'deviceId': u'eb10e80a006f0d00'}]
:param botengine: BotEngine environment
:param device_object: Device object that sent the alert
:param alert_type: Type of alert
"""
return
def device_deleted(self, botengine, device_object):
"""
Device is getting deleted
:param botengine: BotEngine environment
:param device_object: Device object that is getting deleted
"""
return
def question_answered(self, botengine, question):
"""
The user answered a question
:param botengine: BotEngine environment
:param question: Question object
"""
return
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
"""
return
def timer_fired(self, botengine, argument):
"""
The bot's intelligence timer fired
:param botengine: Current botengine environment
:param argument: Argument applied when setting the timer
"""
if argument == SUNRISE:
botengine.get_logger().info("SUNRISE timer fired")
self.parent.narrate(botengine,
title = _("Sunrise"),
description = _("It is sunrise at '{}'.").format(self.parent.get_location_name(botengine)),
priority = botengine.NARRATIVE_PRIORITY_DEBUG,
icon = 'sunrise'
)
analytics.track(botengine, self.parent, 'sunrise')
self.parent.is_daylight = True
self.parent.distribute_datastream_message(botengine, "sunrise_fired", None, internal=True, external=False)
elif argument == SUNSET:
botengine.get_logger().info("SUNSET timer fired")
self.parent.narrate(botengine,
title = _("Sunset"),
description = _("It is sunset at '{}'.").format(self.parent.get_location_name(botengine)),
priority = botengine.NARRATIVE_PRIORITY_DEBUG,
icon = 'sunset'
)
analytics.track(botengine, self.parent, 'sunset')
self.parent.is_daylight = False
self.parent.distribute_datastream_message(botengine, "sunset_fired", None, internal=True, external=False)
self._set_sunrise_sunset_alarm(botengine)
def coordinates_updated(self, botengine, latitude, longitude):
"""
Approximate coordinates of the parent proxy device object have been updated
:param latitude: Latitude
:param longitude: Longitude
"""
botengine.get_logger().info("location_daylight_microservice: Lat/Long updated - recalculating sunrise/sunset times")
self._set_sunrise_sunset_alarm(botengine)
#===========================================================================
# Sunlight
#===========================================================================
def is_daylight(self, botengine):
"""
Is it daylight outside?
:param botengine: BotEngine environment
:return: True if we think it's daytime at this location
"""
next_sunrise_ms = self.next_sunrise_timestamp_ms(botengine)
next_sunset_ms = self.next_sunset_timestamp_ms(botengine)
return next_sunset_ms < next_sunrise_ms
def next_sunrise_timestamp_ms(self, botengine):
"""
:param botengine: BotEngine environment
:return: The next sunrise timestamp in ms
"""
try:
ephem = importlib.import_module("ephem")
except ImportError:
ephem = None
if self.parent.longitude is None or self.parent.latitude is None or ephem is None:
# Ya, we don't have any coordinate information. Call it 8 AM.
dt = self.parent.get_local_datetime(botengine).replace(hour=8)
now = datetime.datetime.now(dt.tzinfo)
if dt < now:
dt = dt + datetime.timedelta(hours=24)
return int(dt.strftime('%s')) * 1000
o = ephem.Observer()
o.lat = str(self.parent.latitude)
o.long = str(self.parent.longitude)
dt = ephem.localtime(o.next_rising(ephem.Sun()))
return int(dt.strftime('%s')) * 1000
def next_sunset_timestamp_ms(self, botengine):
"""
:param botengine: BotEngine environment
:return: The next sunset timestamp in ms
"""
try:
ephem = importlib.import_module("ephem")
except ImportError:
ephem = None
if self.parent.longitude is None or self.parent.latitude is None or ephem is None:
# We don't have any coordinate information. Call it 8 PM.
dt = self.parent.get_local_datetime(botengine).replace(hour=20)
now = datetime.datetime.now(dt.tzinfo)
if dt < now:
dt = dt + datetime.timedelta(hours=24)
return int(dt.strftime('%s')) * 1000
o = ephem.Observer()
o.lat = str(self.parent.latitude)
o.long = str(self.parent.longitude)
dt = ephem.localtime(o.next_setting(ephem.Sun()))
return int(dt.strftime('%s')) * 1000
def _set_sunrise_sunset_alarm(self, botengine):
"""
Internal method to reset the sunrise / sunset alarm
:param botengine:
:return:
"""
self.cancel_timers(botengine)
sunset_timestamp_ms = self.next_sunset_timestamp_ms(botengine)
sunrise_timestamp_ms = self.next_sunrise_timestamp_ms(botengine)
# We're getting double sunrise and double sunset events, and I believe it's from the ephem library not knowing that sunrise/sunset is right now.
# So we'll check to see if the sunrise and/or sunset happened before now, and then adjust it by 24 hours.
if sunrise_timestamp_ms - (utilities.ONE_MINUTE_MS * 5) < botengine.get_timestamp():
sunrise_timestamp_ms += utilities.ONE_DAY_MS
if sunset_timestamp_ms - (utilities.ONE_MINUTE_MS * 5) < botengine.get_timestamp():
sunset_timestamp_ms += utilities.ONE_DAY_MS
self.parent.update_location_properties(botengine, {
'sunset_ms': sunset_timestamp_ms,
'sunrise_ms': sunrise_timestamp_ms,
'latitude': self.parent.latitude,
'longitude': self.parent.longitude,
'timezone': self.parent.get_local_timezone_string(botengine)
})
if sunrise_timestamp_ms < sunset_timestamp_ms:
# Sunrise is next
botengine.get_logger().info("Location: Setting sunrise alarm for " + str(sunrise_timestamp_ms))
self.set_alarm(botengine, sunrise_timestamp_ms, argument=SUNRISE)
else:
# Sunset is next
botengine.get_logger().info("Location: Setting sunset alarm for " + str(sunset_timestamp_ms))
self.set_alarm(botengine, sunset_timestamp_ms, argument=SUNSET)
return
|
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import print_function
import os, shutil
import numpy
import random
from random import shuffle
def saveFile(path, content):
with open(path, 'a') as out:
out.write(content + '\n')
def generate_qualification(user_dict, quality_file, promotion_file, quality_rate):
#TODO: IF FILE EXISTS, READ, OTHRWISE MAKE
quality_dict = {}
if os.path.isfile(quality_file):
with open(quality_file, 'r') as qf:
lines = qf.readlines()
for line in lines:
info = line.split('\t')
score = info[1].strip()
quality_dict[info[0]] = float(score)
with open(promotion_file, "w") as pf:
with open(quality_file,"w") as qf:
for user in user_dict.keys():
if random.random()<quality_rate:
quality_dict[user] = 1
print('%s\t1'%(user),file=qf)
print('%s\t1'%(user),file=pf)
else:
quality_dict[user] = 0
print('%s\t0'%(user),file=qf)
print('%s\t0'%(user),file=pf)
return quality_dict
def read_user_data(user_file):
user_dict = {}
with open(user_file, "r") as uf:
lines = uf.readlines()
for line in lines:
info = line.split('\t')
user_dict[info[0]] = info[1].strip()
return user_dict
def read_manager_data(manager_file):
manager_dict = {}
with open(manager_file, 'r') as mf:
lines= mf.readlines()
for line in lines:
info = line.split('\t')
user1 = info[0].strip()
user2 = info[1].strip()
if user2 in manager_dict.keys():
manager_dict[user2].append(user1)
else:
manager_dict[user2] = [user1]
return manager_dict
def generate_opinion(user_dict, quality_dict, manager_dict,opinion_equal_mp, opinion_equal_mn, opinion_notequal_mp, opinion_notequal_mn, opinion_equal_p, opinion_equal_n, opinion_notequal_p, opinion_notequal_n, opinion_file):
opinion_dict = {}
with open(opinion_file, 'w') as ff:
for user1,label1 in user_dict.items():
for user2, label2 in user_dict.items():
if user1==user2: continue
if user2 in manager_dict.keys():
if user1 in manager_dict[user2]:
if quality_dict[user2]==1:
if label1==label2:
if random.random()<opinion_equal_mp:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if random.random()<opinion_notequal_mp:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if label1==label2:
if random.random()<opinion_equal_mn:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if random.random()<opinion_notequal_mn:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if quality_dict[user2]==1:
if label1==label2:
if random.random()<opinion_equal_p:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if random.random()<opinion_notequal_p:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if label1==label2:
if random.random()<opinion_equal_n:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if random.random()<opinion_notequal_n:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if quality_dict[user2]==1:
if label1==label2:
if random.random()<opinion_equal_p:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if random.random()<opinion_notequal_p:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if label1==label2:
if random.random()<opinion_equal_n:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
else:
if random.random()<opinion_notequal_n:
print('%s\t%s\t1'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 1
else:
print('%s\t%s\t0'%(user1,user2), file =ff)
opinion_dict[(user1,user2)] = 0
return opinion_dict
def generate_submission(user_file, ingroup_file, manager_file, quality_file, opinion_file, submit_file, promotion_file, submission_rate_A, submission_rate_B, opinion_equal_p, opinion_equal_n, opinion_equal_mn, opinion_equal_mp, opinion_notequal_p, opinion_notequal_n, opinion_notequal_mp, opinion_notequal_mn, quality_rate, performance_rate_p, performance_rate_n, performance_file):
user_dict = read_user_data(user_file)
generate_ingroup(user_dict, ingroup_file)
quality_dict = generate_qualification(user_dict, quality_file, promotion_file, quality_rate)
manager_dict = read_manager_data(manager_file)
opinion_dict = generate_opinion(user_dict, quality_dict, manager_dict,opinion_equal_mp, opinion_equal_mn, opinion_notequal_mp, opinion_notequal_mn, opinion_equal_p, opinion_equal_n, opinion_notequal_p, opinion_notequal_n, opinion_file)
with open(submit_file, "w") as sf:
for key, opinion in opinion_dict.items():
user1 = key[0]
user2 = key[1]
if user_dict[user1] =='A':
if random.random()<submission_rate_A:
print('%s\t%s\t%d'%(user1,user2,opinion), file =sf)
else:
if random.random()<submission_rate_B:
print('%s\t%s\t%d'%(user1,user2,opinion), file =sf)
performance_dict = generate_performance(quality_dict, performance_rate_p, performance_rate_n, performance_file)
info_e, info_em = generate_discrimination_score(user_dict, manager_dict, opinion_dict, quality_dict, performance_dict)
return info_e, info_em
def generate_performance(quality_dict, performance_rate_p, performance_rate_n, performance_file):
performance_dict = {}
#TODO: IF FILE EXISTS, READ, OTHRWISE MAKE
if os.path.isfile(performance_file):
with open(performance_file, 'r') as pf:
lines = pf.readlines()
for line in lines:
info = line.split('\t')
score = info[1].strip()
performance_dict[info[0]] = float(score)
else:
with open(performance_file, 'w') as pf:
for user,quality in quality_dict.items():
if quality_dict[user]==1:
if random.random()<performance_rate_p:
print('%s\t%d'%(user,1), file =pf)
performance_dict[user] = 1
else:
print('%s\t%d'%(user,0), file =pf)
performance_dict[user] = 0
else:
if random.random()<performance_rate_n:
print('%s\t%d'%(user,1), file =pf)
performance_dict[user] = 1
else:
print('%s\t%d'%(user,0), file =pf)
performance_dict[user] = 0
return performance_dict
def generate_ingroup(user_dict, ingroup_file):
with open(ingroup_file, 'w') as igf:
for user1, label1 in user_dict.items():
for user2, label2 in user_dict.items():
if label1==label2:
print('%s\t%s\t%d'%(user1,user2,1), file =igf)
else:
print('%s\t%s\t%d'%(user1,user2,0), file =igf)
# In[2]:
def generate_discrimination_score(employees, manager_dict, opinion_dict, quality_dict, performance_dict):
info_e = []
opinion_e = {}
for e1 in employees.keys():
opinion_e[e1] = 0
for e2 in employees.keys():
if e1==e2:continue
opinion_e[e1]+=float(opinion_dict[(e2,e1)])
opinion_e[e1] = float(opinion_e[e1])/float(len(employees))
info_e.append((opinion_e[e1], performance_dict[e1], quality_dict[e1]))
info_em = []
opinion_em = {}
for e in employees.keys():
opinion_em[e] = 0
if e in manager_dict.keys():
for m in manager_dict[e]:
opinion_em[e]+=float(opinion_dict[(m,e)])
opinion_em[e] = float(opinion_em[e])/float(len(manager_dict[e]))
info_em.append((opinion_em[e], performance_dict[e], quality_dict[e]))
else:
info_em.append((None, performance_dict[e], quality_dict[e]))
return info_e, info_em
# In[3]:
def run(theta, folder_name):
i=1
while i<=len(theta):
user_file = '../data/parameters/'+folder_name+'/'+str(i)+'/label.txt'
quality_file = '../data/parameters/'+folder_name+'/'+str(i)+'/quality.txt'
opinion_file='../data/parameters/'+folder_name+'/'+str(i)+'/opinion.txt'
submit_file = '../data/parameters/'+folder_name+'/'+str(i)+'/submit.txt'
promotion_file = '../data/parameters/'+folder_name+'/'+str(i)+'/promotion.txt'
performance_file = '../data/parameters/'+folder_name+'/'+str(i)+'/performance.txt'
manager_file='../data/parameters/'+folder_name+'/'+str(i)+'/manager.txt'
ingroup_file = '../data/parameters/'+folder_name+'/'+str(i)+'/ingroup.txt'
opinion_equal_mp = theta[i-1][1]
opinion_equal_mn = theta[i-1][3]
opinion_notequal_mp =theta[i-1][0]
opinion_notequal_mn =theta[i-1][2]
opinion_equal_p = theta[i-1][1]
opinion_equal_n = theta[i-1][3]
opinion_notequal_p =theta[i-1][0]
opinion_notequal_n =theta[i-1][2]
submission_rate_A = 0.6
submission_rate_B = 0.6
quality_rate = 0.4
performance_rate_p = 0.6
performance_rate_n = 0.1
info_e, info_em = generate_submission(user_file, ingroup_file, manager_file, quality_file, opinion_file, submit_file, promotion_file, submission_rate_A, submission_rate_B, opinion_equal_p, opinion_equal_n, opinion_equal_mn, opinion_equal_mp, opinion_notequal_p, opinion_notequal_n, opinion_notequal_mp, opinion_notequal_mn, quality_rate, performance_rate_p, performance_rate_n, performance_file)
i+=1
#generate_performance(quality_dict, performance_rate_p, performance_rate_n, performance_file)
# In[4]:
def get_discrimination_score(info_e, info_em):
score = 0
for item in info_e:
if item[0]>=0.5 and item[2]==1:
score+=1
elif item[0]<0.5 and item[2]==0:
score+=1
disc_1 = float(score)/float(len(info_e))
print(disc_1)
score = 0
for item in info_em:
if item[0]==None:continue
if item[0]>=0.5 and item[2]==1:
score+=1
elif item[0]<0.5 and item[2]==0:
score+=1
disc_2 = float(score)/float(len(info_e))
print(disc_2)
# In[5]:
'''
theta = [[0.0,1.0,0.0,0.0],[0.33,1.0,0.0,0.0],[0.66,1.0,0.0,0.0], [1.0,1.0,0.0,0.0], [1.0,1.0,0.0,0.33], [1.0,1.0,0.0,0.66], [1.0,1.0,0.0,1.0]]
#folder_name = 'GC-parameters'
#run(theta, folder_name)
folder_name = 'Uni_param'
run(theta, folder_name)
'''
'''
theta = [[0.0,1.0,0.1,0.1],[0.2,1.0,0.1,0.1],[0.4,1.0,0.1,0.1], [0.6,1.0,0.1,0.1], [0.8,1.0,0.1,0.1], [1.0,1.0,0.1,0.1]]
folder_name = 'GC-parameters1'
run(theta, folder_name)
folder_name = 'Uni-parameters1'
run(theta, folder_name)
theta = [[1.0,0.0,0.1,0.1],[1.0,0.2, 0.1,0.1],[1.0,0.4,0.1,0.1], [1.0,0.6,0.1,0.1], [1.0,0.8,0.1,0.1], [1.0,1.0,0.1,0.1]]
folder_name = 'GC-parameters2'
run(theta, folder_name)
folder_name = 'Uni-parameters2'
run(theta, folder_name)
'''
# In[ ]:
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import mock
from oslo_serialization import jsonutils
import testtools
from nova.api.openstack.compute import services
from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_objects
from nova.tests.unit.objects import test_service
from nova.tests import uuidsentinel as uuids
class ComputeHostAPITestCase(test.TestCase):
def setUp(self):
super(ComputeHostAPITestCase, self).setUp()
self.host_api = compute.HostAPI()
self.aggregate_api = compute_api.AggregateAPI()
self.ctxt = context.get_admin_context()
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.req = fakes.HTTPRequest.blank('')
self.controller = services.ServiceController()
self.useFixture(nova_fixtures.SingleCellSimple())
def _compare_obj(self, obj, db_obj):
test_objects.compare_obj(self, obj, db_obj,
allow_missing=test_service.OPTIONAL)
def _compare_objs(self, obj_list, db_obj_list):
self.assertEqual(len(obj_list), len(db_obj_list),
"The length of two object lists are different.")
for index, obj in enumerate(obj_list):
self._compare_obj(obj, db_obj_list[index])
def test_set_host_enabled(self):
fake_notifier.NOTIFICATIONS = []
@mock.patch.object(self.host_api.rpcapi, 'set_host_enabled',
return_value='fake-result')
@mock.patch.object(self.host_api, '_assert_host_exists',
return_value='fake_host')
def _do_test(mock_assert_host_exists, mock_set_host_enabled):
result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
'fake_enabled')
self.assertEqual('fake-result', result)
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('HostAPI.set_enabled.start', msg.event_type)
self.assertEqual('api.fake_host', msg.publisher_id)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake_enabled', msg.payload['enabled'])
self.assertEqual('fake_host', msg.payload['host_name'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('HostAPI.set_enabled.end', msg.event_type)
self.assertEqual('api.fake_host', msg.publisher_id)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake_enabled', msg.payload['enabled'])
self.assertEqual('fake_host', msg.payload['host_name'])
_do_test()
def test_host_name_from_assert_hosts_exists(self):
@mock.patch.object(self.host_api.rpcapi, 'set_host_enabled',
return_value='fake-result')
@mock.patch.object(self.host_api, '_assert_host_exists',
return_value='fake_host')
def _do_test(mock_assert_host_exists, mock_set_host_enabled):
result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
'fake_enabled')
self.assertEqual('fake-result', result)
_do_test()
def test_get_host_uptime(self):
@mock.patch.object(self.host_api.rpcapi, 'get_host_uptime',
return_value='fake-result')
@mock.patch.object(self.host_api, '_assert_host_exists',
return_value='fake_host')
def _do_test(mock_assert_host_exists, mock_get_host_uptime):
result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
self.assertEqual('fake-result', result)
_do_test()
def test_get_host_uptime_service_down(self):
@mock.patch.object(self.host_api.db, 'service_get_by_compute_host',
return_value=dict(test_service.fake_service, id=1))
@mock.patch.object(self.host_api.servicegroup_api, 'service_is_up',
return_value=False)
def _do_test(mock_service_is_up, mock_service_get_by_compute_host):
self.assertRaises(exception.ComputeServiceUnavailable,
self.host_api.get_host_uptime, self.ctxt,
'fake_host')
_do_test()
def test_host_power_action(self):
fake_notifier.NOTIFICATIONS = []
@mock.patch.object(self.host_api.rpcapi, 'host_power_action',
return_value='fake-result')
@mock.patch.object(self.host_api, '_assert_host_exists',
return_value='fake_host')
def _do_test(mock_assert_host_exists, mock_host_power_action):
result = self.host_api.host_power_action(self.ctxt, 'fake_host',
'fake_action')
self.assertEqual('fake-result', result)
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('HostAPI.power_action.start', msg.event_type)
self.assertEqual('api.fake_host', msg.publisher_id)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake_action', msg.payload['action'])
self.assertEqual('fake_host', msg.payload['host_name'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('HostAPI.power_action.end', msg.event_type)
self.assertEqual('api.fake_host', msg.publisher_id)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake_action', msg.payload['action'])
self.assertEqual('fake_host', msg.payload['host_name'])
_do_test()
def test_set_host_maintenance(self):
fake_notifier.NOTIFICATIONS = []
@mock.patch.object(self.host_api.rpcapi, 'host_maintenance_mode',
return_value='fake-result')
@mock.patch.object(self.host_api, '_assert_host_exists',
return_value='fake_host')
def _do_test(mock_assert_host_exists, mock_host_maintenance_mode):
result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
'fake_mode')
self.assertEqual('fake-result', result)
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('HostAPI.set_maintenance.start', msg.event_type)
self.assertEqual('api.fake_host', msg.publisher_id)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake_host', msg.payload['host_name'])
self.assertEqual('fake_mode', msg.payload['mode'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('HostAPI.set_maintenance.end', msg.event_type)
self.assertEqual('api.fake_host', msg.publisher_id)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake_host', msg.payload['host_name'])
self.assertEqual('fake_mode', msg.payload['mode'])
_do_test()
def test_service_get_all_cells(self):
cells = objects.CellMappingList.get_all(self.ctxt)
for cell in cells:
with context.target_cell(self.ctxt, cell) as cctxt:
objects.Service(context=cctxt,
binary='nova-compute',
host='host-%s' % cell.uuid).create()
services = self.host_api.service_get_all(self.ctxt, all_cells=True)
self.assertEqual(sorted(['host-%s' % cell.uuid for cell in cells]),
sorted([svc.host for svc in services]))
def test_service_get_all_no_zones(self):
services = [dict(test_service.fake_service,
id=1, topic='compute', host='host1'),
dict(test_service.fake_service,
topic='compute', host='host2')]
@mock.patch.object(self.host_api.db, 'service_get_all')
def _do_test(mock_service_get_all):
mock_service_get_all.return_value = services
# Test no filters
result = self.host_api.service_get_all(self.ctxt)
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, services)
# Test no filters #2
mock_service_get_all.reset_mock()
result = self.host_api.service_get_all(self.ctxt, filters={})
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, services)
# Test w/ filter
mock_service_get_all.reset_mock()
result = self.host_api.service_get_all(self.ctxt,
filters=dict(host='host2'))
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, [services[1]])
_do_test()
def test_service_get_all(self):
services = [dict(test_service.fake_service,
topic='compute', host='host1'),
dict(test_service.fake_service,
topic='compute', host='host2')]
exp_services = []
for service in services:
exp_service = {}
exp_service.update(availability_zone='nova', **service)
exp_services.append(exp_service)
@mock.patch.object(self.host_api.db, 'service_get_all')
def _do_test(mock_service_get_all):
mock_service_get_all.return_value = services
# Test no filters
result = self.host_api.service_get_all(self.ctxt, set_zones=True)
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, exp_services)
# Test no filters #2
mock_service_get_all.reset_mock()
result = self.host_api.service_get_all(self.ctxt, filters={},
set_zones=True)
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, exp_services)
# Test w/ filter
mock_service_get_all.reset_mock()
result = self.host_api.service_get_all(self.ctxt,
filters=dict(host='host2'),
set_zones=True)
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, [exp_services[1]])
# Test w/ zone filter but no set_zones arg.
mock_service_get_all.reset_mock()
filters = {'availability_zone': 'nova'}
result = self.host_api.service_get_all(self.ctxt,
filters=filters)
mock_service_get_all.assert_called_once_with(self.ctxt,
disabled=None)
self._compare_objs(result, exp_services)
_do_test()
def test_service_get_by_compute_host(self):
@mock.patch.object(self.host_api.db, 'service_get_by_compute_host',
return_value=test_service.fake_service)
def _do_test(mock_service_get_by_compute_host):
result = self.host_api.service_get_by_compute_host(self.ctxt,
'fake-host')
self.assertEqual(test_service.fake_service['id'], result.id)
_do_test()
def test_service_update(self):
host_name = 'fake-host'
binary = 'nova-compute'
params_to_update = dict(disabled=True)
service_id = 42
expected_result = dict(test_service.fake_service, id=service_id)
@mock.patch.object(self.host_api.db, 'service_get_by_host_and_binary')
@mock.patch.object(self.host_api.db, 'service_update')
def _do_test(mock_service_update, mock_service_get_by_host_and_binary):
mock_service_get_by_host_and_binary.return_value = expected_result
mock_service_update.return_value = expected_result
result = self.host_api.service_update(
self.ctxt, host_name, binary, params_to_update)
self._compare_obj(result, expected_result)
_do_test()
@mock.patch.object(objects.InstanceList, 'get_by_host',
return_value = ['fake-responses'])
def test_instance_get_all_by_host(self, mock_get):
result = self.host_api.instance_get_all_by_host(self.ctxt,
'fake-host')
self.assertEqual(['fake-responses'], result)
def test_task_log_get_all(self):
@mock.patch.object(self.host_api.db, 'task_log_get_all',
return_value='fake-response')
def _do_test(mock_task_log_get_all):
result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
'fake-begin', 'fake-end',
host='fake-host',
state='fake-state')
self.assertEqual('fake-response', result)
_do_test()
@mock.patch.object(objects.CellMappingList, 'get_all',
return_value=objects.CellMappingList(objects=[
objects.CellMapping(
uuid=uuids.cell1_uuid,
transport_url='mq://fake1',
database_connection='db://fake1'),
objects.CellMapping(
uuid=uuids.cell2_uuid,
transport_url='mq://fake2',
database_connection='db://fake2'),
objects.CellMapping(
uuid=uuids.cell3_uuid,
transport_url='mq://fake3',
database_connection='db://fake3')]))
@mock.patch.object(objects.Service, 'get_by_uuid',
side_effect=[
exception.ServiceNotFound(
service_id=uuids.service_uuid),
objects.Service(uuid=uuids.service_uuid)])
def test_service_get_by_id_using_uuid(self, service_get_by_uuid,
cell_mappings_get_all):
"""Tests that we can lookup a service in the HostAPI using a uuid.
There are two calls to objects.Service.get_by_uuid and the first
raises ServiceNotFound so that we ensure we keep looping over the
cells. We'll find the service in the second cell and break the loop
so that we don't needlessly check in the third cell.
"""
def _fake_set_target_cell(ctxt, cell_mapping):
if cell_mapping:
# These aren't really what would be set for values but let's
# keep this simple so we can assert something is set when a
# mapping is provided.
ctxt.db_connection = cell_mapping.database_connection
ctxt.mq_connection = cell_mapping.transport_url
# We have to override the SingleCellSimple fixture.
self.useFixture(fixtures.MonkeyPatch(
'nova.context.set_target_cell', _fake_set_target_cell))
ctxt = context.get_admin_context()
self.assertIsNone(ctxt.db_connection)
self.host_api.service_get_by_id(ctxt, uuids.service_uuid)
# We should have broken the loop over the cells and set the target cell
# on the context.
service_get_by_uuid.assert_has_calls(
[mock.call(ctxt, uuids.service_uuid)] * 2)
self.assertEqual('db://fake2', ctxt.db_connection)
@mock.patch('nova.context.set_target_cell')
@mock.patch('nova.compute.api.load_cells')
@mock.patch('nova.objects.Service.get_by_id')
def test_service_delete(self, get_by_id, load_cells, set_target):
compute_api.CELLS = [
objects.CellMapping(),
objects.CellMapping(),
objects.CellMapping(),
]
service = mock.MagicMock()
get_by_id.side_effect = [exception.ServiceNotFound(service_id=1),
service,
exception.ServiceNotFound(service_id=1)]
self.host_api.service_delete(self.ctxt, 1)
get_by_id.assert_has_calls([mock.call(self.ctxt, 1),
mock.call(self.ctxt, 1),
mock.call(self.ctxt, 1)])
service.destroy.assert_called_once_with()
set_target.assert_called_once_with(self.ctxt, compute_api.CELLS[1])
@mock.patch('nova.context.set_target_cell')
@mock.patch('nova.compute.api.load_cells')
@mock.patch('nova.objects.Service.get_by_id')
def test_service_delete_ambiguous(self, get_by_id, load_cells, set_target):
compute_api.CELLS = [
objects.CellMapping(),
objects.CellMapping(),
objects.CellMapping(),
]
service1 = mock.MagicMock()
service2 = mock.MagicMock()
get_by_id.side_effect = [exception.ServiceNotFound(service_id=1),
service1,
service2]
self.assertRaises(exception.ServiceNotUnique,
self.host_api.service_delete, self.ctxt, 1)
self.assertFalse(service1.destroy.called)
self.assertFalse(service2.destroy.called)
self.assertFalse(set_target.called)
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.HostMapping, 'get_by_host')
def test_service_delete_compute_in_aggregate(self, mock_hm, mock_get_cn):
compute = self.host_api.db.service_create(self.ctxt,
{'host': 'fake-compute-host',
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
# This is needed because of lazy-loading service.compute_node
cn = objects.ComputeNode(uuid=uuids.cn, host="fake-compute-host",
hypervisor_hostname="fake-compute-host")
mock_get_cn.return_value = [cn]
aggregate = self.aggregate_api.create_aggregate(self.ctxt,
'aggregate',
None)
self.aggregate_api.add_host_to_aggregate(self.ctxt,
aggregate.id,
'fake-compute-host')
self.controller.delete(self.req, compute.id)
result = self.aggregate_api.get_aggregate(self.ctxt,
aggregate.id).hosts
self.assertEqual([], result)
mock_hm.return_value.destroy.assert_called_once_with()
@mock.patch('nova.db.compute_node_statistics')
def test_compute_node_statistics(self, mock_cns):
# Note this should only be called twice
mock_cns.side_effect = [
{'stat1': 1, 'stat2': 4.0},
{'stat1': 5, 'stat2': 1.2},
]
compute_api.CELLS = [objects.CellMapping(uuid=uuids.cell1),
objects.CellMapping(
uuid=objects.CellMapping.CELL0_UUID),
objects.CellMapping(uuid=uuids.cell2)]
stats = self.host_api.compute_node_statistics(self.ctxt)
self.assertEqual({'stat1': 6, 'stat2': 5.2}, stats)
@mock.patch.object(objects.CellMappingList, 'get_all',
return_value=objects.CellMappingList(objects=[
objects.CellMapping(
uuid=objects.CellMapping.CELL0_UUID,
transport_url='mq://cell0',
database_connection='db://cell0'),
objects.CellMapping(
uuid=uuids.cell1_uuid,
transport_url='mq://fake1',
database_connection='db://fake1'),
objects.CellMapping(
uuid=uuids.cell2_uuid,
transport_url='mq://fake2',
database_connection='db://fake2')]))
@mock.patch.object(objects.ComputeNode, 'get_by_uuid',
side_effect=[exception.ComputeHostNotFound(
host=uuids.cn_uuid),
objects.ComputeNode(uuid=uuids.cn_uuid)])
def test_compute_node_get_using_uuid(self, compute_get_by_uuid,
cell_mappings_get_all):
"""Tests that we can lookup a compute node in the HostAPI using a uuid.
"""
self.host_api.compute_node_get(self.ctxt, uuids.cn_uuid)
# cell0 should have been skipped, and the compute node wasn't found
# in cell1 so we checked cell2 and found it
self.assertEqual(2, compute_get_by_uuid.call_count)
compute_get_by_uuid.assert_has_calls(
[mock.call(self.ctxt, uuids.cn_uuid)] * 2)
@mock.patch.object(objects.CellMappingList, 'get_all',
return_value=objects.CellMappingList(objects=[
objects.CellMapping(
uuid=objects.CellMapping.CELL0_UUID,
transport_url='mq://cell0',
database_connection='db://cell0'),
objects.CellMapping(
uuid=uuids.cell1_uuid,
transport_url='mq://fake1',
database_connection='db://fake1'),
objects.CellMapping(
uuid=uuids.cell2_uuid,
transport_url='mq://fake2',
database_connection='db://fake2')]))
@mock.patch.object(objects.ComputeNode, 'get_by_uuid',
side_effect=exception.ComputeHostNotFound(
host=uuids.cn_uuid))
def test_compute_node_get_not_found(self, compute_get_by_uuid,
cell_mappings_get_all):
"""Tests that we can lookup a compute node in the HostAPI using a uuid
and will fail with ComputeHostNotFound if we didn't find it in any
cell.
"""
self.assertRaises(exception.ComputeHostNotFound,
self.host_api.compute_node_get,
self.ctxt, uuids.cn_uuid)
# cell0 should have been skipped, and the compute node wasn't found
# in cell1 or cell2.
self.assertEqual(2, compute_get_by_uuid.call_count)
compute_get_by_uuid.assert_has_calls(
[mock.call(self.ctxt, uuids.cn_uuid)] * 2)
class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
def setUp(self):
self.flags(enable=True, group='cells')
self.flags(cell_type='api', group='cells')
super(ComputeHostAPICellsTestCase, self).setUp()
@testtools.skip('cellsv1 does not use this')
def test_service_get_all_cells(self):
pass
@testtools.skip('cellsv1 does not use this')
def test_service_delete_ambiguous(self):
pass
def test_service_get_all_no_zones(self):
services = [
cells_utils.ServiceProxy(
objects.Service(id=1, topic='compute', host='host1'),
'cell1'),
cells_utils.ServiceProxy(
objects.Service(id=2, topic='compute', host='host2'),
'cell1')]
fake_filters = {'host': 'host1'}
@mock.patch.object(self.host_api.cells_rpcapi, 'service_get_all')
def _do_test(mock_service_get_all):
mock_service_get_all.return_value = services
result = self.host_api.service_get_all(self.ctxt,
filters=fake_filters)
self.assertEqual(services, result)
_do_test()
def _test_service_get_all(self, fake_filters, **kwargs):
service_attrs = dict(test_service.fake_service)
del service_attrs['version']
services = [
cells_utils.ServiceProxy(
objects.Service(**dict(service_attrs, id=1,
topic='compute', host='host1')),
'cell1'),
cells_utils.ServiceProxy(
objects.Service(**dict(service_attrs, id=2,
topic='compute', host='host2')),
'cell1')]
exp_services = []
for service in services:
exp_service = copy.copy(service)
exp_service.update({'availability_zone': 'nova'})
exp_services.append(exp_service)
@mock.patch.object(self.host_api.cells_rpcapi, 'service_get_all')
def _do_test(mock_service_get_all):
mock_service_get_all.return_value = services
result = self.host_api.service_get_all(self.ctxt,
filters=fake_filters,
**kwargs)
mock_service_get_all.assert_called_once_with(self.ctxt,
filters=fake_filters)
self.assertEqual(jsonutils.to_primitive(exp_services),
jsonutils.to_primitive(result))
_do_test()
def test_service_get_all(self):
fake_filters = {'availability_zone': 'nova'}
self._test_service_get_all(fake_filters)
def test_service_get_all_set_zones(self):
fake_filters = {'key1': 'val1'}
self._test_service_get_all(fake_filters, set_zones=True)
def test_service_get_by_compute_host(self):
obj = objects.Service(id=1, host='fake')
fake_service = cells_utils.ServiceProxy(obj, 'cell1')
@mock.patch.object(self.host_api.cells_rpcapi,
'service_get_by_compute_host')
def _do_test(mock_service_get_by_compute_host):
mock_service_get_by_compute_host.return_value = fake_service
result = self.host_api.service_get_by_compute_host(self.ctxt,
'fake-host')
self.assertEqual(fake_service, result)
_do_test()
def test_service_update(self):
host_name = 'fake-host'
binary = 'nova-compute'
params_to_update = dict(disabled=True)
obj = objects.Service(id=42, host='fake')
fake_service = cells_utils.ServiceProxy(obj, 'cell1')
@mock.patch.object(self.host_api.cells_rpcapi, 'service_update')
def _do_test(mock_service_update):
mock_service_update.return_value = fake_service
result = self.host_api.service_update(
self.ctxt, host_name, binary, params_to_update)
self.assertEqual(fake_service, result)
_do_test()
def test_service_delete(self):
cell_service_id = cells_utils.cell_with_item('cell1', 1)
with mock.patch.object(self.host_api.cells_rpcapi,
'service_delete') as service_delete:
self.host_api.service_delete(self.ctxt, cell_service_id)
service_delete.assert_called_once_with(
self.ctxt, cell_service_id)
@testtools.skip('cells do not support host aggregates')
def test_service_delete_compute_in_aggregate(self):
# this test is not valid for cell
pass
@mock.patch.object(objects.InstanceList, 'get_by_host')
def test_instance_get_all_by_host(self, mock_get):
instances = [dict(id=1, cell_name='cell1', host='host1'),
dict(id=2, cell_name='cell2', host='host1'),
dict(id=3, cell_name='cell1', host='host2')]
mock_get.return_value = instances
expected_result = [instances[0], instances[2]]
cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host')
result = self.host_api.instance_get_all_by_host(self.ctxt,
cell_and_host)
self.assertEqual(expected_result, result)
def test_task_log_get_all(self):
@mock.patch.object(self.host_api.cells_rpcapi, 'task_log_get_all',
return_value='fake-response')
def _do_test(mock_task_log_get_all):
result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
'fake-begin', 'fake-end',
host='fake-host',
state='fake-state')
self.assertEqual('fake-response', result)
_do_test()
def test_get_host_uptime_service_down(self):
# The corresponding Compute test case depends on the
# _assert_host_exists which is a no-op in the cells api
pass
def test_get_host_uptime(self):
@mock.patch.object(self.host_api.cells_rpcapi, 'get_host_uptime',
return_value='fake-response')
def _do_test(mock_get_host_uptime):
result = self.host_api.get_host_uptime(self.ctxt, 'fake-host')
self.assertEqual('fake-response', result)
_do_test()
def test_compute_node_statistics(self):
# Not implementing cross-cellsv2 for cellsv1
pass
def test_compute_node_get_using_uuid(self):
cell_compute_uuid = cells_utils.cell_with_item('cell1', uuids.cn_uuid)
with mock.patch.object(self.host_api.cells_rpcapi,
'compute_node_get') as compute_node_get:
self.host_api.compute_node_get(self.ctxt, cell_compute_uuid)
compute_node_get.assert_called_once_with(self.ctxt, cell_compute_uuid)
def test_compute_node_get_not_found(self):
cell_compute_uuid = cells_utils.cell_with_item('cell1', uuids.cn_uuid)
with mock.patch.object(self.host_api.cells_rpcapi, 'compute_node_get',
side_effect=exception.CellRoutingInconsistency(
reason='because_cells_v1')):
self.assertRaises(exception.ComputeHostNotFound,
self.host_api.compute_node_get,
self.ctxt, cell_compute_uuid)
|
|
# -*- coding: utf-8 -*-
"""
Tests that don't hit the Google Music servers.
"""
from collections import namedtuple
import logging
import os
import time
from mock import MagicMock, patch
from proboscis.asserts import (
assert_raises, assert_true, assert_false, assert_equal,
assert_is_not, Check
)
from proboscis import test
import gmusicapi.session
from gmusicapi.clients import Webclient, Musicmanager
from gmusicapi.exceptions import AlreadyLoggedIn, CallFailure
from gmusicapi.protocol.shared import authtypes, ClientLogin
from gmusicapi.protocol import mobileclient
from gmusicapi.test.utils import NoticeLogging
from gmusicapi.utils import utils, jsarray
jsarray_samples = []
jsarray_filenames = [base + '.jsarray' for base in ('searchresult', 'fetchartist')]
test_file_dir = os.path.dirname(os.path.abspath(__file__))
for filepath in [os.path.join(test_file_dir, p) for p in jsarray_filenames]:
with open(filepath, 'r') as f:
jsarray_samples.append(f.read().decode('utf-8'))
# TODO test gather_local, transcoding
# All tests end up in the local group.
test = test(groups=['local'])
@test
def longest_increasing_sub():
lisi = utils.longest_increasing_subseq
assert_equal(lisi([]), [])
assert_equal(lisi(range(10, 0, -1)), [1])
assert_equal(lisi(range(10, 20)), range(10, 20))
assert_equal(lisi([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9]),
[1, 2, 3, 5, 8, 9])
#
# clients
#
# this feels like a dumb pattern, but I can't think of a better way
names = ('Webclient', 'Musicmanager')
Clients = namedtuple('Clients', [n.lower() for n in names])
def create_clients():
clients = []
for name in names:
cls = getattr(gmusicapi.clients, name)
c = cls()
# mock out the underlying session
c.session = MagicMock()
clients.append(c)
return Clients(*clients)
@test
def no_client_auth_initially():
wc = Webclient()
assert_false(wc.is_authenticated())
mm = Musicmanager()
assert_false(mm.is_authenticated())
@test
def mm_prevents_bad_mac_format():
mm = create_clients().musicmanager
with Check() as check:
for bad_mac in ['bogus',
'11:22:33:44:55:66:',
'11:22:33:44:55:ab',
'11:22:33:44:55']:
check.raises(
ValueError,
mm._perform_upauth,
uploader_id=bad_mac,
uploader_name='valid')
# @test
# def auto_playlists_are_empty():
# # this doesn't actually hit the server at the moment.
# # see issue 102
# api = Api()
# assert_equal(api.get_all_playlist_ids(auto=True, user=False),
# {'auto': {}})
#
# sessions
#
Sessions = namedtuple('Sessions', [n.lower() for n in names])
def create_sessions():
sessions = []
for name in names:
cls = getattr(gmusicapi.session, name)
s = cls()
# mock out the underlying requests.session
s._rsession = MagicMock()
sessions.append(s)
return Sessions(*sessions)
@test
def no_session_auth_initially():
for s in create_sessions():
assert_false(s.is_authenticated)
@test
def session_raises_alreadyloggedin():
for s in create_sessions():
s.is_authenticated = True
def login():
# hackish: login ignores args so we can test them all here;
# this just ensures we have an acceptable amount of args
s.login(*([None] * 3))
assert_raises(AlreadyLoggedIn, login)
@test
def session_logout():
for s in create_sessions():
s.is_authenticated = True
old_session = s._rsession
s.logout()
assert_false(s.is_authenticated)
old_session.close.assert_called_once_with()
assert_is_not(s._rsession, old_session)
@test
def send_without_auth():
for s in create_sessions():
s.is_authenticated = True
mock_session = MagicMock()
mock_req_kwargs = {'fake': 'kwargs'}
s.send(mock_req_kwargs, authtypes(), mock_session)
# sending without auth should not use the normal session,
# since that might have auth cookies automatically attached
assert_false(s._rsession.called)
mock_session.request.called_once_with(**mock_req_kwargs)
mock_session.closed.called_once_with()
#
# protocol
#
@test
def clientlogin_raises_on_strange_response():
mock_session = MagicMock()
mock_res = MagicMock()
mock_res.status_code = 403
mock_res.text = (
'Error=BadAuthentication'
'\nUrl=https://www.google.com/accounts/...'
'\nInfo=WebLoginRequired')
mock_session.send = MagicMock(return_value=mock_res)
root_logger = logging.getLogger('gmusicapi')
noticer = [h for h in root_logger.handlers
if isinstance(h, NoticeLogging)][0]
with patch.object(noticer, 'emit', return_value=None) as mock_emit:
# This call should generate a warning.
# We don't want it to fail the build, though.
assert_raises(CallFailure,
ClientLogin.perform,
mock_session, False, 'email', 'pass')
assert_true(mock_emit.called)
@test
def authtypes_factory_defaults():
auth = authtypes()
assert_false(auth.oauth)
assert_false(auth.sso)
assert_false(auth.xt)
@test
def authtypes_factory_args():
auth = authtypes(oauth=True)
assert_true(auth.oauth)
assert_false(auth.sso)
assert_false(auth.xt)
@test
def mc_url_signing():
sig, _ = mobileclient.GetStreamUrl.get_signature("Tdr6kq3xznv5kdsphyojox6dtoq",
"1373247112519")
assert_equal(sig, "gua1gInBdaVo7_dSwF9y0kodua0")
#
# utils
#
@test
def retry_failure_propogation():
@utils.retry(tries=1)
def raise_exception():
raise AssertionError
assert_raises(AssertionError, raise_exception)
@test
def retry_sleep_timing():
@utils.retry(tries=3, delay=.05, backoff=2)
def raise_exception():
raise AssertionError
pre = time.time()
assert_raises(AssertionError, raise_exception)
post = time.time()
delta = post - pre
assert_true(.15 < delta < .2, "delta: %s" % delta)
@test
def retry_is_dual_decorator():
@utils.retry
def return_arg(arg=None):
return arg
assert_equal(return_arg(1), 1)
@test
def jsarray_parsing():
for raw in jsarray_samples:
# should not raise an exception
jsarray.loads(raw)
@test
def locate_transcoder():
utils.locate_mp3_transcoder() # should not raise
|
|
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from infoblox_client import exceptions as ib_ex
from infoblox_client import objects as obj
from infoblox_client import utils as ib_utils
LOG = logging.getLogger(__name__)
class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network."""
# NIOS does not allow to set Dhcp options for IPv6 over WAPI,
# so limit options usage with IPv4 only
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if ipv4 and nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
try:
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
except ib_ex.InfobloxSearchError:
return False
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
def find_hostname(self, dns_view, hostname, ip):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip)
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
try:
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
except ib_ex.InfobloxSearchError:
return False
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
try:
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
except ib_ex.InfobloxCannotCreateObject:
LOG.warning('Unable to create DNS zone %(dns_zone_fqdn)s '
'for %(dns_view)s',
{'dns_zone_fqdn': dns_zone, 'dns_view': dns_view})
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
try:
ea_defs = obj.EADefinition.search_all(self.connector)
return ea_defs
except ib_ex.InfobloxSearchError:
return None
def create_ea_definition(self, ea_def):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
def create_required_ea_definitions(self, required_ea_defs):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = filter(lambda x: not next(
(y for y in existing_ea_defs if x['name'] == y.name), None),
required_ea_defs)
for ea_def in missing_ea_defs:
self.create_ea_definition(ea_def)
def restart_all_services(self, member):
if not member._ref:
member.fetch()
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def get_object_refs_associated_with_a_record(self, a_record_ref):
# record should in the format: {object_type, search_field}
associated_with_a_record = [
{'type': 'record:cname', 'search': 'canonical'},
{'type': 'record:txt', 'search': 'name'}
]
ib_obj_refs = []
a_record = self.connector.get_object(a_record_ref)
for rec_inf in associated_with_a_record:
obj_type = rec_inf['type']
payload = {'view': a_record['view'],
rec_inf['search']: a_record['name']}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
ib_obj_refs.append(ib_obj['_ref'])
return ib_obj_refs
def get_all_associated_objects(self, network_view, ip):
ip_objects = obj.IPAddress.search(self.connector,
network_view=network_view,
ip_address=ip)
if ip_objects:
return ip_objects.objects
return []
@staticmethod
def _get_object_type_from_ref(ref):
return ref.split('/', 1)[0]
def delete_all_associated_objects(self, network_view, ip, delete_list):
del_ib_objs = []
ib_obj_refs = self.get_all_associated_objects(network_view, ip)
for ib_obj_ref in ib_obj_refs:
del_ib_objs.append(ib_obj_ref)
obj_type = self._get_object_type_from_ref(ib_obj_ref)
if obj_type in ['record:a', 'record:aaaa']:
del_ib_objs.extend(
self.get_object_refs_associated_with_a_record(ib_obj_ref))
for ib_obj_ref in del_ib_objs:
obj_type = self._get_object_type_from_ref(ib_obj_ref)
if obj_type in delete_list:
self.connector.delete_object(ib_obj_ref)
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# type: ignore
'''Fixtures for Selenium end-to-end tests.'''
import contextlib
import json
import logging
import os.path
import time
import urllib
from typing import Optional
import pytest
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from ui import util # pylint: disable=no-name-in-module
_DEFAULT_TIMEOUT = 10 # seconds
_DIRNAME = os.path.dirname(__file__)
_SUCCESS = True
_WINDOW_SIZE = (1920, 1080)
_BLANK = '/404.html' # An path that returns 200 in both Firefox and Chrome.
class JavaScriptLogCollector:
'''Collects JavaScript errors from the log.'''
def __init__(self, dr):
self.driver = dr
self._log_index = 0
self._log_stack = [[]]
def empty(self) -> bool:
'''Returns whether the stack is empty.'''
# There is one catch-all frame at the bottom of the stack when nobody
# has called push().
return len(self._log_stack) <= 1
def extend(self, errors):
'''Injects errors into the current log frame.'''
self._log_stack[-1].extend(errors)
def push(self):
'''Pushes a new error list.'''
self._log_stack[-1].extend(self._get_last_console_logs())
self._log_stack.append([])
def pop(self):
'''Grabs the last error list.'''
self._log_stack[-1].extend(self._get_last_console_logs())
return self._log_stack.pop()
def _get_last_console_logs(self):
'''Grabs the latest set of JavaScript logs and clears them.'''
try:
browser_logs = self.driver.browser.get_log('browser')
except WebDriverException:
# Firefox does not support getting console logs.
browser_logs = []
current_index, self._log_index = self._log_index, len(browser_logs)
for entry in browser_logs[current_index:]:
if entry['level'] != 'SEVERE':
continue
logging.info(entry)
if 'WebSocket' in entry['message']:
# Travis does not have broadcaster yet.
continue
if 'https://www.facebook.com/' in entry['message']:
# Let's not block submissions when Facebook is
# having a bad day.
continue
yield entry
class Driver: # pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
'''Wraps the state needed to run a test.'''
# pylint: disable=too-many-arguments
def __init__(self, browser, browser_name, wait, url, worker_id, options):
self.browser = browser
self.browser_name = browser_name
self.wait = wait
self._worker_id = worker_id
self._next_id = 0
self._screenshot_index = 0
self._url = url
self.options = options
self.user_username = self.create_user()
self.admin_username = self.create_user(admin=True)
self.log_collector = JavaScriptLogCollector(self)
self.test_name = ''
def generate_id(self):
'''Generates a relatively unique id.'''
self._next_id += 1
return '%s_%d_%d' % (self._worker_id, int(time.time()), self._next_id)
def url(self, path):
'''Gets the full url for :path.'''
return urllib.parse.urljoin(self._url, path)
def mysql_auth(self):
'''Gets the authentication string for MySQL.'''
return util.database_utils.authentication(
config_file=self.options.mysql_config_file,
username=self.options.username, password=self.options.password)
def eval_script(self, script):
'''Returns the evaluation of the JavaScript expression |script|'''
return self.browser.execute_script('return (%s);' % script)
def assert_script(self, script):
'''Asserts that evaluating the JavaScript |script| returns true.'''
assert self.browser.execute_script('return !!(%s);' % script), \
'Evaluation of `%s` returned false' % script
def assert_script_equal(self, script, value):
'''Asserts that evaluating the JavaScript |script| returns true.'''
assert self.eval_script(script) == value, script
@contextlib.contextmanager
def page_transition(self, wait_for_ajax=True, target_url=None):
'''Waits for a page transition to finish.'''
html_node = self.browser.find_element_by_tag_name('html')
prev_url = self.browser.current_url
logging.debug('Waiting for a page transition on %s', prev_url)
yield
self.wait.until(EC.staleness_of(html_node))
logging.debug('New URL: %s', self.browser.current_url)
if target_url:
self.wait.until(EC.url_to_be(target_url))
logging.debug('Target URL: %s', self.browser.current_url)
if wait_for_ajax:
self._wait_for_page_loaded()
def _wait_for_page_loaded(self):
'''Waits for the page to be loaded.'''
try:
def _is_page_loaded(*_):
return self.browser.execute_script(
'return document.readyState;') == 'complete'
if _is_page_loaded():
return
logging.debug('Waiting for the page to finish loading...')
self.wait.until(_is_page_loaded)
logging.debug('Page loaded')
except TimeoutException as ex:
raise Exception('document ready state still %s' %
self.browser.execute_script(
'return document.readyState;')) from ex
t0 = time.time()
try:
def _is_jquery_done(*_):
return self.browser.execute_script(
'return jQuery.active;') == 0
logging.debug('Waiting for all the pending AJAXcalls to finish...')
self.wait.until(_is_jquery_done)
logging.debug('AJAX calls done.')
except TimeoutException as ex:
raise Exception('%d AJAX calls still active after %f s' %
(self.browser.execute_script(
'return jQuery.active;'),
time.time() - t0)) from ex
def typeahead_helper(self, parent_xpath, value, select_suggestion=True):
'''Helper to interact with Typeahead elements.'''
tt_input = self.wait.until(
EC.visibility_of_element_located(
(By.XPATH,
'//%s//input[contains(@class, "tt-input")]' % parent_xpath)))
tt_input.click()
tt_input.send_keys(value)
if not select_suggestion:
return
self.wait.until(
EC.element_to_be_clickable(
(By.XPATH,
'//%s//div[@data-value = "%s"]' %
(parent_xpath, value)))).click()
def typeahead_helper_v2(self, parent_selector, value):
'''Helper to interact with Typeahead elements.'''
tt_input = self.wait.until(
EC.visibility_of_element_located(
(By.CSS_SELECTOR,
'%s .tags-input input[type="text"]' % parent_selector)))
tt_input.click()
tt_input.send_keys(value)
self.wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR,
'%s ul.typeahead-dropdown li:first-of-type' %
(parent_selector)))).click()
def send_keys(self, # pylint: disable=no-self-use
element: WebElement,
value: str,
retries: int = 10) -> None:
'''Helper to _really_ send keys to an element.
For some yet unexplained reason when running in non-headless mode, the
interactions with text elements do not always register by the browser.
This causes input elements to remain empty even after sending the keys.
This method sends the keys and then ensures that the value of the
element is the expected string, retrying if necessary.
'''
for _ in range(retries):
element.clear()
element.send_keys(value)
if element.get_attribute('value') == value:
return
logging.error('Failed to send keys to the element')
@contextlib.contextmanager
def login_user(self):
'''Logs in as a user, and logs out when out of scope.'''
with self.login(self.user_username, 'user'):
yield
@contextlib.contextmanager
def login_admin(self):
'''Logs in as an admin, and logs out when out of scope.'''
with self.login(self.admin_username, 'omegaup'):
yield
@contextlib.contextmanager
def login(self, username, password, is_main_user_identity=True):
'''Logs in as :username, and logs out when out of scope.'''
# Home page
logging.debug('Logging in as %s...', username)
home_page_url = self.url('/')
self.browser.get(home_page_url)
self._wait_for_page_loaded()
self.wait.until(
EC.element_to_be_clickable(
(By.XPATH,
'//a[starts-with(@href, "/login/")]'))).click()
# Login screen
self.wait.until(lambda _: self.browser.current_url != home_page_url)
self._wait_for_page_loaded()
self.browser.find_element_by_name('login_username').send_keys(username)
self.browser.find_element_by_name('login_password').send_keys(password)
with self.page_transition():
self.browser.find_element_by_name('login').click()
if is_main_user_identity:
self.wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'button[aria-label="Close"]'))).click()
try:
yield
except: # noqa: bare-except
self.screenshot()
finally:
# Wait until there are no more pending requests to avoid races
# where those requests return 401. Navigate to a blank page just
# for good measure and to enforce that there are two URL changes.
self._wait_for_page_loaded()
with self.page_transition():
self.browser.get(self.url(_BLANK))
with self.page_transition(target_url=home_page_url):
self.browser.get(self.url('/logout/?redirect=/'))
assert self.browser.current_url == home_page_url, (
'Invalid URL redirect. Expected %s, got %s' % (
home_page_url, self.browser.current_url))
@util.no_javascript_errors()
@util.annotate
def register_user(self, user, passw):
'''Creates user :user and logs out when out of scope.'''
# Home page
home_page_url = self.url('/')
with self.page_transition():
self.browser.get(self.url(_BLANK))
with self.page_transition():
self.browser.get(home_page_url)
with self.page_transition():
self.wait.until(
EC.element_to_be_clickable(
(By.XPATH,
'//a[contains(@href, "/login/")]'))).click()
# Login screen
self.browser.find_element_by_name('reg_username').send_keys(user)
self.browser.find_element_by_name('reg_email').send_keys(
'email_%s@localhost.localdomain' % user)
self.browser.find_element_by_name('reg_password').send_keys(passw)
self.browser.find_element_by_name(
'reg_password_confirmation').send_keys(passw)
with self.page_transition():
self.browser.find_element_by_name('sign_up').click()
# Enable experiment
user_id = util.database_utils.mysql(
('''
SELECT
`u`.`user_id`
FROM
`Users` `u`
INNER JOIN
`Identities` `i`
ON
`u`.`main_identity_id` = `i`.`identity_id`
WHERE
`i`.`username` = '%s';
''') % (user),
dbname='omegaup', auth=self.mysql_auth())
self.enable_experiment_identities_to_user(user_id)
# Home screen
with self.page_transition():
self.browser.get(self.url(_BLANK))
with self.page_transition(target_url=home_page_url):
self.browser.get(self.url('/logout/?redirect=/'))
assert self.browser.current_url == home_page_url, (
'Invalid URL redirect. Expected %s, got %s' % (
home_page_url, self.browser.current_url))
def annotate(self, # pylint: disable=no-self-use
message: str,
level=logging.INFO) -> None:
'''Add an annotation to the run's log.'''
logging.log(level, message)
def update_run_score(self, run_id, verdict, score):
'''Set verdict and score of specified run'''
util.database_utils.mysql(
('''
UPDATE
`Runs`
SET
`score` = %s,
`contest_score` = %s,
`verdict` = '%s',
`status` = 'ready'
WHERE
`run_id` = %s;
''') % (str(score), str(score * 100), verdict, str(run_id)),
dbname='omegaup', auth=self.mysql_auth())
def update_score_in_course(self, problem_alias, assignment_alias,
verdict='AC', score=1):
'''Set verdict and score of latest run in a course'''
run_id = util.database_utils.mysql(
('''
SELECT
MAX(`r`.`run_id`)
FROM
`Submissions` AS `s`
INNER JOIN
`Runs` AS `r` ON
`r`.`run_id` = `s`.`current_run_id`
INNER JOIN
`Problems` AS `p` ON
`p`.`problem_id` = `s`.`problem_id`
INNER JOIN
`Problemsets` AS `ps` ON
`ps`.`problemset_id` = `s`.`problemset_id`
INNER JOIN
`Assignments` AS `a` ON `a`.`acl_id` = `ps`.`acl_id`
WHERE
`p`.`alias` = '%s'
AND `a`.`alias` = '%s';
''') % (problem_alias, assignment_alias),
dbname='omegaup', auth=self.mysql_auth())
self.update_run_score(int(run_id.strip()), verdict, score)
def update_score_in_contest(self, problem_alias, contest_alias,
verdict='AC', score=1):
'''Set verdict and score of latest run in a contest'''
run_id = util.database_utils.mysql(
('''
SELECT
MAX(`r`.`run_id`)
FROM
`Submissions` AS `s`
INNER JOIN
`Runs` AS `r` ON
`r`.`run_id` = `s`.`current_run_id`
INNER JOIN
`Problems` AS `p` ON
`p`.`problem_id` = `s`.`problem_id`
INNER JOIN
`Problemsets` AS `ps` ON
`ps`.`problemset_id` = `s`.`problemset_id`
INNER JOIN
`Contests` AS `c` ON `c`.`acl_id` = `ps`.`acl_id`
WHERE
`p`.`alias` = '%s'
AND `c`.`alias` = '%s';
''') % (problem_alias, contest_alias),
dbname='omegaup', auth=self.mysql_auth())
self.update_run_score(int(run_id.strip()), verdict, score)
def update_score(self, problem_alias, verdict='AC', score=1):
'''Set verdict and score of latest run doesn't belong to problemset.'''
run_id = util.database_utils.mysql(
('''
SELECT
MAX(`r`.`run_id`)
FROM
`Submissions` AS `s`
INNER JOIN
`Runs` AS `r` ON
`r`.`run_id` = `s`.`current_run_id`
INNER JOIN
`Problems` AS `p` ON
`p`.`problem_id` = `s`.`problem_id`
WHERE
`p`.`alias` = '%s';
''') % (problem_alias),
dbname='omegaup', auth=self.mysql_auth())
self.update_run_score(int(run_id.strip()), verdict, score)
def create_user(self, admin=False):
'''Create a user, with optional admin privileges.'''
if admin:
username = 'admin_%s' % self.generate_id()
# password = 'omegaup'
password = (
'$2a$08$tyE7x/yxOZ1ltM7YAuFZ8OK/56c9Fsr/XDqgPe22IkOORY2kAAg2a')
else:
username = 'user_%s' % self.generate_id()
# password = 'user'
password = (
'$2a$08$wxJh5voFPGuP8fUEthTSvutdb1OaWOa8ZCFQOuU/ZxcsOuHGw0Cqy')
# Add the user directly to the database to make this fast and avoid UI
# flake.
identity_id = util.database_utils.mysql(
('''
INSERT INTO
Identities(`username`, `password`, `name`)
VALUES
('%s', '%s', '%s');
SELECT LAST_INSERT_ID();
''') % (username, password, username),
dbname='omegaup', auth=self.mysql_auth())
user_id = util.database_utils.mysql(
('''
INSERT INTO
Users(`main_identity_id`, `verified`)
VALUES
(%s, 1);
SELECT LAST_INSERT_ID();
''') % (identity_id),
dbname='omegaup', auth=self.mysql_auth())
util.database_utils.mysql(
('''
UPDATE
Identities
SET
user_id = %s
WHERE
identity_id = %s;
''') % (user_id, identity_id),
dbname='omegaup', auth=self.mysql_auth())
# Enable experiment
self.enable_experiment_identities_to_user(user_id)
if admin:
util.database_utils.mysql(
('''
INSERT INTO
User_Roles(`user_id`, `role_id`, `acl_id`)
VALUES
(%s, 1, 1);
''') % (user_id,),
dbname='omegaup', auth=self.mysql_auth())
return username
def enable_experiment_identities_to_user(self, user_id):
''' Enable identities experiment to users can use functions of
identity refactor
'''
util.database_utils.mysql(
('''
INSERT INTO
Users_Experiments(`user_id`, `experiment`)
VALUES
('%s', 'identities');
''') % (user_id),
dbname='omegaup', auth=self.mysql_auth())
def screenshot(self, name: Optional[str] = None) -> None:
'''Takes a screenshot.'''
results_dir = os.path.join(_DIRNAME, 'results')
os.makedirs(results_dir, exist_ok=True)
idx = self._screenshot_index
self._screenshot_index += 1
self.browser.get_screenshot_as_file(
os.path.join(results_dir,
f'webdriver_{name or self.test_name}.{idx:03}.png'))
@pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
'''Takes a screenshot and grabs console logs on test failures.'''
global _SUCCESS # pylint: disable=global-statement
current_driver: Optional[Driver] = pyfuncitem.funcargs.get('driver')
if current_driver:
current_driver.test_name = pyfuncitem.name
outcome = yield
if not outcome.excinfo:
return
_SUCCESS = False
if not current_driver:
return
try:
try:
logs = current_driver.browser.get_log('browser')
except: # noqa: bare-except
# geckodriver does not support getting logs:
# https://github.com/mozilla/geckodriver/issues/284
logs = []
results_dir = os.path.join(_DIRNAME, 'results')
os.makedirs(results_dir, exist_ok=True)
current_driver.screenshot(pyfuncitem.name)
logpath = os.path.join(results_dir,
'webdriver_%s.log' % pyfuncitem.name)
with open(logpath, 'w') as logfile:
json.dump(logs, logfile, indent=2)
except Exception as ex: # pylint: disable=broad-except
print(ex)
def pytest_addoption(parser):
'''Allow configuration of test invocation.'''
parser.addoption('--browser', action='append', type=str, dest='browsers',
help='The browsers that the test will run against')
parser.addoption('--url', default='http://localhost:8001/',
help='The URL that the test will be run against')
parser.addoption('--disable-headless', action='store_false',
dest='headless', help='Show the browser window')
parser.addoption('--mysql-config-file',
default=util.database_utils.default_config_file(),
help='.my.cnf file that stores credentials')
parser.addoption('--username', default='root', help='MySQL root username')
parser.addoption('--password', default='omegaup', help='MySQL password')
def pytest_generate_tests(metafunc):
'''Parameterize the tests with the browsers.'''
if not metafunc.config.option.browsers:
metafunc.config.option.browsers = ['chrome', 'firefox']
if 'driver' in metafunc.fixturenames:
metafunc.parametrize('browser_name', metafunc.config.option.browsers,
scope='session')
def _get_browser(request, browser_name):
'''Gets a browser object from the request parameters.'''
if browser_name == 'chrome':
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option(
'prefs', {'intl.accept_languages': 'en_US'})
chrome_options.add_argument('--lang=en-US')
if request.config.option.headless:
chrome_options.add_argument('--headless')
chrome_options.set_capability('loggingPrefs', {'browser': 'ALL'})
chrome_browser = webdriver.Chrome(
options=chrome_options)
chrome_browser.set_window_size(*_WINDOW_SIZE)
return chrome_browser
firefox_options = webdriver.firefox.options.Options()
firefox_options.set_capability('marionette', True)
firefox_options.set_capability('loggingPrefs', {'browser': 'ALL'})
firefox_options.profile = webdriver.FirefoxProfile()
firefox_options.profile.set_preference(
'webdriver.log.file', '/tmp/firefox_console')
firefox_options.headless = request.config.option.headless
firefox_browser = webdriver.Firefox(
options=firefox_options)
firefox_browser.set_window_size(*_WINDOW_SIZE)
return firefox_browser
@pytest.yield_fixture(scope='session')
def driver(request, browser_name):
'''Run tests using the selenium webdriver.'''
try:
browser = _get_browser(request, browser_name)
browser.implicitly_wait(_DEFAULT_TIMEOUT)
if browser_name != 'firefox':
# Ensure that getting browser logs is supported in non-Firefox
# browsers.
assert isinstance(browser.get_log('browser'), list)
wait = WebDriverWait(browser, _DEFAULT_TIMEOUT,
poll_frequency=0.1)
try:
yield Driver(browser, browser_name, wait,
request.config.option.url,
os.environ.get('PYTEST_XDIST_WORKER', 'w0'),
request.config.option)
finally:
browser.quit()
except: # noqa: bare-except
logging.exception('Failed to initialize')
raise
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating clones for models
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_model_steps as model_create
from . import create_ensemble_steps as ensemble_create
from . import create_linear_steps as linear_create
from . import create_cluster_steps as cluster_create
from . import create_lda_steps as topic_create
from . import create_anomaly_steps as anomaly_create
from . import create_association_steps as association_create
from . import create_time_series_steps as time_create
from . import create_pca_steps as pca_create
class TestCloning(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a clone from a model:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
Then the origin model is the previous model
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '10', '10', '10']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
model = world.model["resource"]
model_create.clone_model(self, model)
model_create.the_cloned_model_is(self, model)
def test_scenario2(self):
"""
Scenario: Successfully creating a clone from a ensemble:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble
And I wait until the ensemble is ready less than <time_3> secs
Then the origin ensemble is the previous ensemble
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', '10', '10', '30']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
ensemble_create.i_create_an_ensemble(self)
ensemble_create.the_ensemble_is_finished_in_less_than(
self, example[3])
ensemble = world.ensemble["resource"]
ensemble_create.clone_ensemble(self, ensemble)
ensemble_create.the_cloned_ensemble_is(self, ensemble)
def test_scenario3(self):
"""
Scenario: Successfully creating a clone from a deepnet:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a quick deepnet
And I wait until the deepnet is ready less than <time_3> secs
Then the origin deepnet is the previous deepnet
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario3.__doc__)
examples = [
['data/iris.csv', '10', '10', '100']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
model_create.i_create_a_quick_deepnet(self)
model_create.the_deepnet_is_finished_in_less_than(
self, example[3])
deepnet = world.deepnet["resource"]
model_create.clone_deepnet(self, deepnet)
model_create.the_cloned_deepnet_is(self, deepnet)
def test_scenario4(self):
"""
Scenario: Successfully creating a clone from a logistic regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a logistic regression
And I wait until the logistic regression is ready less than <time_3> secs
Then the origin logistic regression is the previous logistic regression
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario4.__doc__)
examples = [
['data/iris.csv', '10', '10', '30']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
model_create.i_create_a_logistic_model(self)
model_create.the_logistic_model_is_finished_in_less_than(
self, example[3])
logistic_regression = world.logistic_regression["resource"]
model_create.clone_logistic_regression(self, logistic_regression)
model_create.the_cloned_logistic_regression_is(
self, logistic_regression)
def test_scenario5(self):
"""
Scenario: Successfully creating a clone from a linear regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a linear regression
And I wait until the linear regression is ready less than <time_3> secs
Then the origin linear regression is the previous linear regression
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario5.__doc__)
examples = [
['data/iris.csv', '10', '10', '30']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
linear_create.i_create_a_linear_regression_from_dataset(self)
linear_create.the_linear_regression_is_finished_in_less_than(
self, example[3])
linear_regression = world.linear_regression["resource"]
linear_create.clone_linear_regression(self, linear_regression)
linear_create.the_cloned_linear_regression_is(
self, linear_regression)
def test_scenario6(self):
"""
Scenario: Successfully creating a clone from a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster
And I wait until the cluster is ready less than <time_3> secs
Then the origin cluster is the previous cluster
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario6.__doc__)
examples = [
['data/iris.csv', '10', '10', '30']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
cluster_create.i_create_a_cluster(self)
cluster_create.the_cluster_is_finished_in_less_than(
self, example[3])
cluster = world.cluster["resource"]
cluster_create.clone_cluster(self, cluster)
cluster_create.the_cloned_cluster_is(
self, cluster)
def test_scenario7(self):
"""
Scenario: Successfully creating a clone from a topic model:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a topic model
And I wait until the topic model is ready less than <time_3> secs
Then the origin topic model is the previous topic model
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario7.__doc__)
examples = [
['data/spam.csv', '10', '10', '100', '{"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false, "language": "en"}}}}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
source_create.i_update_source_with(self, example[4])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
topic_create.i_create_a_topic_model(self)
topic_create.the_topic_model_is_finished_in_less_than(
self, example[3])
topic_model = world.topic_model["resource"]
topic_create.clone_topic_model(self, topic_model)
topic_create.the_cloned_topic_model_is(
self, topic_model)
def test_scenario8(self):
"""
Scenario: Successfully creating a clone from an anomaly detector:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less than <time_3> secs
Then the origin anomaly detector is the previous anomaly detector
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario8.__doc__)
examples = [
['data/iris.csv', '10', '10', '100']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
anomaly_create.i_create_an_anomaly(self)
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example[3])
anomaly = world.anomaly["resource"]
anomaly_create.clone_anomaly(self, anomaly)
anomaly_create.the_cloned_anomaly_is(
self, anomaly)
def test_scenario9(self):
"""
Scenario: Successfully creating a clone from an association:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an association
And I wait until the association is ready less than <time_3> secs
Then the origin association is the previous association
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario9.__doc__)
examples = [
['data/iris.csv', '10', '10', '100']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
association_create.i_create_an_association_from_dataset(self)
association_create.the_association_is_finished_in_less_than(
self, example[3])
association = world.association["resource"]
association_create.clone_association(self, association)
association_create.the_cloned_association_is(
self, association)
def test_scenario10(self):
"""
Scenario: Successfully creating a clone from a time series:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a time series
And I wait until the time series is ready less than <time_3> secs
Then the origin time series is the previous time series
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario10.__doc__)
examples = [
['data/iris.csv', '10', '10', '100']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
time_create.i_create_a_time_series(self)
time_create.the_time_series_is_finished_in_less_than(
self, example[3])
time_series = world.time_series["resource"]
time_create.clone_time_series(self, time_series)
time_create.the_cloned_time_series_is(
self, time_series)
def test_scenario11(self):
"""
Scenario: Successfully creating a clone from a pca:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a pca
And I wait until the pca is ready less than <time_3> secs
Then the origin pca is the previous pca
Examples:
| data | time_1 | time_2 | time_3 |
| ../data/iris.csv | 10 | 10 | 10 |
"""
print(self.test_scenario11.__doc__)
examples = [
['data/iris.csv', '10', '10', '100']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example[2])
pca_create.i_create_a_pca(self)
pca_create.the_pca_is_finished_in_less_than(
self, example[3])
pca = world.pca["resource"]
pca_create.clone_pca(self, pca)
pca_create.the_cloned_pca_is(self, pca)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime
import errno
import inspect
import os
import re
import sys
import threading
import types
import enum
from oslo_serialization import jsonutils
from oslo_serialization import msgpackutils
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import reflection
import six
from six.moves import map as compat_map
from six.moves import range as compat_range
from taskflow.types import failure
from taskflow.types import notifier
from taskflow.utils import deprecation
NUMERIC_TYPES = six.integer_types + (float,)
# NOTE(imelnikov): regular expression to get scheme from URI,
# see RFC 3986 section 3.1
_SCHEME_REGEX = re.compile(r"^([A-Za-z][A-Za-z0-9+.-]*):")
class StrEnum(str, enum.Enum):
"""An enumeration that is also a string and can be compared to strings."""
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return super(StrEnum, cls).__new__(cls, *args, **kwargs)
def match_type(obj, matchers):
"""Matches a given object using the given matchers list/iterable.
NOTE(harlowja): each element of the provided list/iterable must be
tuple of (valid types, result).
Returns the result (the second element of the provided tuple) if a type
match occurs, otherwise none if no matches are found.
"""
for (match_types, match_result) in matchers:
if isinstance(obj, match_types):
return match_result
else:
return None
def countdown_iter(start_at, decr=1):
"""Generator that decrements after each generation until <= zero.
NOTE(harlowja): we can likely remove this when we can use an
``itertools.count`` that takes a step (on py2.6 which we still support
that step parameter does **not** exist and therefore can't be used).
"""
if decr <= 0:
raise ValueError("Decrement value must be greater"
" than zero and not %s" % decr)
while start_at > 0:
yield start_at
start_at -= decr
def reverse_enumerate(items):
"""Like reversed(enumerate(items)) but with less copying/cloning..."""
for i in countdown_iter(len(items)):
yield i - 1, items[i - 1]
def merge_uri(uri, conf):
"""Merges a parsed uri into the given configuration dictionary.
Merges the username, password, hostname, port, and query parameters of
a URI into the given configuration dictionary (it does **not** overwrite
existing configuration keys if they already exist) and returns the merged
configuration.
NOTE(harlowja): does not merge the path, scheme or fragment.
"""
uri_port = uri.port
specials = [
('username', uri.username, lambda v: bool(v)),
('password', uri.password, lambda v: bool(v)),
# NOTE(harlowja): A different check function is used since 0 is
# false (when bool(v) is applied), and that is a valid port...
('port', uri_port, lambda v: v is not None),
]
hostname = uri.hostname
if hostname:
if uri_port is not None:
hostname += ":%s" % (uri_port)
specials.append(('hostname', hostname, lambda v: bool(v)))
for (k, v, is_not_empty_value_func) in specials:
if is_not_empty_value_func(v):
conf.setdefault(k, v)
for (k, v) in six.iteritems(uri.params()):
conf.setdefault(k, v)
return conf
def find_subclasses(locations, base_cls, exclude_hidden=True):
"""Finds subclass types in the given locations.
This will examines the given locations for types which are subclasses of
the base class type provided and returns the found subclasses (or fails
with exceptions if this introspection can not be accomplished).
If a string is provided as one of the locations it will be imported and
examined if it is a subclass of the base class. If a module is given,
all of its members will be examined for attributes which are subclasses of
the base class. If a type itself is given it will be examined for being a
subclass of the base class.
"""
derived = set()
for item in locations:
module = None
if isinstance(item, six.string_types):
try:
pkg, cls = item.split(':')
except ValueError:
module = importutils.import_module(item)
else:
obj = importutils.import_class('%s.%s' % (pkg, cls))
if not reflection.is_subclass(obj, base_cls):
raise TypeError("Object '%s' (%s) is not a '%s' subclass"
% (item, type(item), base_cls))
derived.add(obj)
elif isinstance(item, types.ModuleType):
module = item
elif reflection.is_subclass(item, base_cls):
derived.add(item)
else:
raise TypeError("Object '%s' (%s) is an unexpected type" %
(item, type(item)))
# If it's a module derive objects from it if we can.
if module is not None:
for (name, obj) in inspect.getmembers(module):
if name.startswith("_") and exclude_hidden:
continue
if reflection.is_subclass(obj, base_cls):
derived.add(obj)
return derived
def pick_first_not_none(*values):
"""Returns first of values that is *not* None (or None if all are/were)."""
for val in values:
if val is not None:
return val
return None
def parse_uri(uri):
"""Parses a uri into its components."""
# Do some basic validation before continuing...
if not isinstance(uri, six.string_types):
raise TypeError("Can only parse string types to uri data, "
"and not '%s' (%s)" % (uri, type(uri)))
match = _SCHEME_REGEX.match(uri)
if not match:
raise ValueError("Uri '%s' does not start with a RFC 3986 compliant"
" scheme" % (uri))
return netutils.urlsplit(uri)
def look_for(haystack, needles, extractor=None):
"""Find items in haystack and returns matches found (in haystack order).
Given a list of items (the haystack) and a list of items to look for (the
needles) this will look for the needles in the haystack and returns
the found needles (if any). The ordering of the returned needles is in the
order they are located in the haystack.
Example input and output:
>>> from taskflow.utils import misc
>>> hay = [3, 2, 1]
>>> misc.look_for(hay, [1, 2])
[2, 1]
"""
if not haystack:
return []
if extractor is None:
extractor = lambda v: v
matches = []
for i, v in enumerate(needles):
try:
matches.append((haystack.index(extractor(v)), i))
except ValueError:
pass
if not matches:
return []
else:
return [needles[i] for (_hay_i, i) in sorted(matches)]
def disallow_when_frozen(excp_cls):
"""Frozen checking/raising method decorator."""
def decorator(f):
@six.wraps(f)
def wrapper(self, *args, **kwargs):
if self.frozen:
raise excp_cls()
else:
return f(self, *args, **kwargs)
return wrapper
return decorator
def clamp(value, minimum, maximum, on_clamped=None):
"""Clamps a value to ensure its >= minimum and <= maximum."""
if minimum > maximum:
raise ValueError("Provided minimum '%s' must be less than or equal to"
" the provided maximum '%s'" % (minimum, maximum))
if value > maximum:
value = maximum
if on_clamped is not None:
on_clamped()
if value < minimum:
value = minimum
if on_clamped is not None:
on_clamped()
return value
def fix_newlines(text, replacement=os.linesep):
"""Fixes text that *may* end with wrong nl by replacing with right nl."""
return replacement.join(text.splitlines())
def binary_encode(text, encoding='utf-8', errors='strict'):
"""Encodes a text string into a binary string using given encoding.
Does nothing if data is already a binary string (raises on unknown types).
"""
if isinstance(text, six.binary_type):
return text
else:
return encodeutils.safe_encode(text, encoding=encoding,
errors=errors)
def binary_decode(data, encoding='utf-8', errors='strict'):
"""Decodes a binary string into a text string using given encoding.
Does nothing if data is already a text string (raises on unknown types).
"""
if isinstance(data, six.text_type):
return data
else:
return encodeutils.safe_decode(data, incoming=encoding,
errors=errors)
def _check_decoded_type(data, root_types=(dict,)):
if root_types:
if not isinstance(root_types, tuple):
root_types = tuple(root_types)
if not isinstance(data, root_types):
if len(root_types) == 1:
root_type = root_types[0]
raise ValueError("Expected '%s' root type not '%s'"
% (root_type, type(data)))
else:
raise ValueError("Expected %s root types not '%s'"
% (list(root_types), type(data)))
return data
def decode_msgpack(raw_data, root_types=(dict,)):
"""Parse raw data to get decoded object.
Decodes a msgback encoded 'blob' from a given raw data binary string and
checks that the root type of that decoded object is in the allowed set of
types (by default a dict should be the root type).
"""
try:
data = msgpackutils.loads(raw_data)
except Exception as e:
# TODO(harlowja): fix this when msgpackutils exposes the msgpack
# exceptions so that we can avoid catching just exception...
raise ValueError("Expected msgpack decodable data: %s" % e)
else:
return _check_decoded_type(data, root_types=root_types)
def decode_json(raw_data, root_types=(dict,)):
"""Parse raw data to get decoded object.
Decodes a JSON encoded 'blob' from a given raw data binary string and
checks that the root type of that decoded object is in the allowed set of
types (by default a dict should be the root type).
"""
try:
data = jsonutils.loads(binary_decode(raw_data))
except UnicodeDecodeError as e:
raise ValueError("Expected UTF-8 decodable data: %s" % e)
except ValueError as e:
raise ValueError("Expected JSON decodable data: %s" % e)
else:
return _check_decoded_type(data, root_types=root_types)
class cachedproperty(object):
"""A *thread-safe* descriptor property that is only evaluated once.
This caching descriptor can be placed on instance methods to translate
those methods into properties that will be cached in the instance (avoiding
repeated attribute checking logic to do the equivalent).
NOTE(harlowja): by default the property that will be saved will be under
the decorated methods name prefixed with an underscore. For example if we
were to attach this descriptor to an instance method 'get_thing(self)' the
cached property would be stored under '_get_thing' in the self object
after the first call to 'get_thing' occurs.
"""
def __init__(self, fget):
self._lock = threading.RLock()
# If a name is provided (as an argument) then this will be the string
# to place the cached attribute under if not then it will be the
# function itself to be wrapped into a property.
if inspect.isfunction(fget):
self._fget = fget
self._attr_name = "_%s" % (fget.__name__)
self.__doc__ = getattr(fget, '__doc__', None)
else:
self._attr_name = fget
self._fget = None
self.__doc__ = None
def __call__(self, fget):
# If __init__ received a string then this will be the function to be
# wrapped as a property (if __init__ got a function then this will not
# be called).
self._fget = fget
self.__doc__ = getattr(fget, '__doc__', None)
return self
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def __get__(self, instance, owner):
if instance is None:
return self
# Quick check to see if this already has been made (before acquiring
# the lock). This is safe to do since we don't allow deletion after
# being created.
if hasattr(instance, self._attr_name):
return getattr(instance, self._attr_name)
else:
with self._lock:
try:
return getattr(instance, self._attr_name)
except AttributeError:
value = self._fget(instance)
setattr(instance, self._attr_name, value)
return value
def millis_to_datetime(milliseconds):
"""Converts number of milliseconds (from epoch) into a datetime object."""
return datetime.datetime.fromtimestamp(float(milliseconds) / 1000)
def get_version_string(obj):
"""Gets a object's version as a string.
Returns string representation of object's version taken from
its 'version' attribute, or None if object does not have such
attribute or its version is None.
"""
obj_version = getattr(obj, 'version', None)
if isinstance(obj_version, (list, tuple)):
obj_version = '.'.join(str(item) for item in obj_version)
if obj_version is not None and not isinstance(obj_version,
six.string_types):
obj_version = str(obj_version)
return obj_version
def sequence_minus(seq1, seq2):
"""Calculate difference of two sequences.
Result contains the elements from first sequence that are not
present in second sequence, in original order. Works even
if sequence elements are not hashable.
"""
result = list(seq1)
for item in seq2:
try:
result.remove(item)
except ValueError:
pass
return result
def get_duplicate_keys(iterable, key=None):
if key is not None:
iterable = compat_map(key, iterable)
keys = set()
duplicates = set()
for item in iterable:
if item in keys:
duplicates.add(item)
keys.add(item)
return duplicates
class ExponentialBackoff(object):
"""An iterable object that will yield back an exponential delay sequence.
This objects provides for a configurable exponent, count of numbers
to generate, and a maximum number that will be returned. This object may
also be iterated over multiple times (yielding the same sequence each
time).
"""
def __init__(self, count, exponent=2, max_backoff=3600):
self.count = max(0, int(count))
self.exponent = exponent
self.max_backoff = max(0, int(max_backoff))
def __iter__(self):
if self.count <= 0:
raise StopIteration()
for i in compat_range(0, self.count):
yield min(self.exponent ** i, self.max_backoff)
def __str__(self):
return "ExponentialBackoff: %s" % ([str(v) for v in self])
def as_int(obj, quiet=False):
"""Converts an arbitrary value into a integer."""
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError("Can not translate '%s' (%s) to an integer"
% (obj, type(obj)))
return obj
# Taken from oslo-incubator file-utils but since that module pulls in a large
# amount of other files it does not seem so useful to include that full
# module just for this function.
def ensure_tree(path):
"""Create a directory (and any ancestor directories required).
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
Failure = deprecation.moved_proxy_class(failure.Failure,
'Failure', __name__,
version="0.6", removal_version="2.0")
Notifier = deprecation.moved_proxy_class(notifier.Notifier,
'Notifier', __name__,
version="0.6", removal_version="2.0")
@contextlib.contextmanager
def capture_failure():
"""Captures the occurring exception and provides a failure object back.
This will save the current exception information and yield back a
failure object for the caller to use (it will raise a runtime error if
no active exception is being handled).
This is useful since in some cases the exception context can be cleared,
resulting in None being attempted to be saved after an exception handler is
run. This can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, yield a failure and
then run other code.
For example::
>>> from taskflow.utils import misc
>>>
>>> def cleanup():
... pass
...
>>>
>>> def save_failure(f):
... print("Saving %s" % f)
...
>>>
>>> try:
... raise IOError("Broken")
... except Exception:
... with misc.capture_failure() as fail:
... print("Activating cleanup")
... cleanup()
... save_failure(fail)
...
Activating cleanup
Saving Failure: IOError: Broken
"""
exc_info = sys.exc_info()
if not any(exc_info):
raise RuntimeError("No active exception is being handled")
else:
yield failure.Failure(exc_info=exc_info)
def is_iterable(obj):
"""Tests an object to to determine whether it is iterable.
This function will test the specified object to determine whether it is
iterable. String types (both ``str`` and ``unicode``) are ignored and will
return False.
:param obj: object to be tested for iterable
:return: True if object is iterable and is not a string
"""
return (not isinstance(obj, six.string_types) and
isinstance(obj, collections.Iterable))
|
|
__all__ = [
'recvfile',
]
import contextlib
import http.client
import logging
import re
import socket
from g1.asyncs.bases import adapters
from g1.bases import loggings
from g1.bases import pools
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
_CHUNK_SIZE = 8192
_BUFFER_POOL = pools.TimeoutPool(
pool_size=128,
allocate=lambda: bytearray(_CHUNK_SIZE),
release=lambda _: None,
)
async def recvfile(response, file):
"""Receive response body into a file.
The caller must set ``stream`` to true when make the request.
DANGER! This breaks the multiple levels of encapsulation, from
requests.Response all the way down to http.client.HTTPResponse.
As a result, the response object is most likely unusable after a
recvfile call, and you should probably close it immediately.
"""
# requests sets _content to False initially.
ASSERT.is_(response._content, False)
ASSERT.false(response._content_consumed)
urllib3_response = ASSERT.not_none(response.raw)
chunked = urllib3_response.chunked
httplib_response = ASSERT.isinstance(
urllib3_response._fp, http.client.HTTPResponse
)
ASSERT.false(httplib_response.closed)
sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket)
output = DecoderChain(file)
if chunked:
chunk_decoder = ChunkDecoder()
output.add(chunk_decoder)
num_to_read = 0
eof = lambda: chunk_decoder.eof
else:
num_to_read = ASSERT.greater(
ASSERT.not_none(httplib_response.length), 0
)
eof = lambda: num_to_read <= 0
# Use urllib3's decoder code.
urllib3_response._init_decoder()
if urllib3_response._decoder is not None:
output.add(ContentDecoder(urllib3_response._decoder))
with contextlib.ExitStack() as stack:
src = adapters.FileAdapter(httplib_response.fp)
stack.callback(src.disown)
sock.setblocking(False)
stack.callback(sock.setblocking, True)
buffer = memoryview(stack.enter_context(_BUFFER_POOL.using()))
while not eof():
if chunked:
# TODO: If server sends more data at the end, like
# response of the next request, for now recvfile might
# read them, and then err out. Maybe recvfile should
# check this, and not read more than it should instead?
num_read = await src.readinto1(buffer)
else:
num_read = await src.readinto1(
buffer[:min(num_to_read, _CHUNK_SIZE)]
)
if num_read == 0:
break
output.write(buffer[:num_read])
num_to_read -= num_read
output.flush()
# Sanity check.
if not chunked:
ASSERT.equal(num_to_read, 0)
# Trick requests to release the connection back to the connection
# pool, rather than closing/discarding it.
response._content_consumed = True
# http.client.HTTPConnection tracks the last response; so you have
# to close it to make the connection object useable again.
httplib_response.close()
# Close the response for the caller since response is not useable
# after recvfile.
response.close()
loggings.ONCE_PER(
1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats()
)
class DecoderChain:
def __init__(self, file):
self._file = file
self._decoders = []
def add(self, decoder):
self._decoders.append(decoder)
def write(self, data):
pieces = [data]
for decoder in self._decoders:
pieces = decoder.decode(pieces)
self._write(pieces)
def flush(self):
for i, decoder in enumerate(self._decoders):
pieces = decoder.flush()
for d in self._decoders[i + 1:]:
pieces = d.decode(pieces)
self._write(pieces)
def _write(self, pieces):
for data in pieces:
if data:
self._file.write(data)
class ChunkDecoder:
_CRLF_PATTERN = re.compile(br'\r\n')
def __init__(self):
self.eof = False
self._chunk_remaining = -2
# Buffer for residual chunk size data from the last `_decode`.
# It is fairly small for now because we do not expect big chunk
# parameter.
self._buffer = memoryview(bytearray(64))
self._pos = 0
def decode(self, pieces):
ASSERT.false(self.eof)
output = []
for data in pieces:
if data:
self._decode(data, output)
return output
def _decode(self, data, output):
def move(n):
"""Move ``n`` bytes from ``data`` to ``output``."""
nonlocal data
ASSERT.greater_or_equal(self._chunk_remaining, n)
output.append(data[:n])
data = data[n:]
self._chunk_remaining -= n
def expect(pattern):
"""Drop ``pattern`` prefix from ``data``."""
nonlocal data
n = min(len(pattern), len(data))
ASSERT.equal(pattern[:n], data[:n])
data = data[n:]
return n
while data:
if self._chunk_remaining > 0:
move(min(self._chunk_remaining, len(data)))
continue
if self._chunk_remaining == 0:
self._chunk_remaining -= expect(b'\r\n')
continue
if self._chunk_remaining == -1:
self._chunk_remaining -= expect(b'\n')
continue
match = self._CRLF_PATTERN.search(data)
if not match:
self._append(data)
match = self._CRLF_PATTERN.search(self._buffer[:self._pos])
if not match:
break
data = self._reset()
chunk_size = data[:match.start()]
if self._pos > 0:
self._append(chunk_size)
chunk_size = self._reset()
# TODO: Handle parameters (stuff after ';').
chunk_size = int(
bytes(chunk_size).split(b';', maxsplit=1)[0],
base=16,
)
if chunk_size == 0:
# TODO: Handle trailers.
self.eof = True
else:
ASSERT.false(self.eof)
data = data[match.end():]
self._chunk_remaining = chunk_size
if self.eof:
ASSERT.empty(data)
def _append(self, data):
end = ASSERT.less_or_equal(self._pos + len(data), len(self._buffer))
self._buffer[self._pos:end] = data
self._pos = end
def _reset(self):
data = self._buffer[:self._pos]
self._pos = 0
return data
def flush(self):
ASSERT.true(self.eof)
ASSERT.equal(self._chunk_remaining, -2)
return []
class ContentDecoder:
def __init__(self, urllib3_decoder):
self._decoder = urllib3_decoder
def decode(self, pieces):
return [self._decoder.decompress(data) for data in pieces if data]
def flush(self):
return [self._decoder.flush()]
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUIngressQOSPolicy(NURESTObject):
""" Represents a IngressQOSPolicy in the VSD
Notes:
A Tunnel Shaper QoS Policy is a policy that groups rate-limiting profiles, traffic directionality and classifiers to govern the rate of traffic being sent or received by an end-host or application.
"""
__rest_name__ = "ingressqospolicy"
__resource_name__ = "ingressqospolicies"
## Constants
CONST_QUEUE2_FORWARDING_CLASSES_H = "H"
CONST_QUEUE2_FORWARDING_CLASSES_NONE = "NONE"
CONST_QUEUE2_FORWARDING_CLASSES_A = "A"
CONST_QUEUE2_FORWARDING_CLASSES_B = "B"
CONST_QUEUE2_FORWARDING_CLASSES_C = "C"
CONST_QUEUE2_FORWARDING_CLASSES_D = "D"
CONST_QUEUE2_FORWARDING_CLASSES_E = "E"
CONST_QUEUE2_FORWARDING_CLASSES_F = "F"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_QUEUE4_FORWARDING_CLASSES_NONE = "NONE"
CONST_QUEUE1_FORWARDING_CLASSES_NONE = "NONE"
CONST_QUEUE3_FORWARDING_CLASSES_H = "H"
CONST_QUEUE3_FORWARDING_CLASSES_C = "C"
CONST_QUEUE3_FORWARDING_CLASSES_B = "B"
CONST_QUEUE3_FORWARDING_CLASSES_A = "A"
CONST_QUEUE3_FORWARDING_CLASSES_G = "G"
CONST_QUEUE3_FORWARDING_CLASSES_F = "F"
CONST_QUEUE3_FORWARDING_CLASSES_E = "E"
CONST_QUEUE3_FORWARDING_CLASSES_D = "D"
CONST_QUEUE1_FORWARDING_CLASSES_A = "A"
CONST_QUEUE1_FORWARDING_CLASSES_C = "C"
CONST_QUEUE1_FORWARDING_CLASSES_B = "B"
CONST_QUEUE1_FORWARDING_CLASSES_E = "E"
CONST_QUEUE1_FORWARDING_CLASSES_D = "D"
CONST_QUEUE1_FORWARDING_CLASSES_G = "G"
CONST_QUEUE1_FORWARDING_CLASSES_F = "F"
CONST_QUEUE1_FORWARDING_CLASSES_H = "H"
CONST_QUEUE4_FORWARDING_CLASSES_B = "B"
CONST_QUEUE4_FORWARDING_CLASSES_C = "C"
CONST_QUEUE4_FORWARDING_CLASSES_A = "A"
CONST_QUEUE4_FORWARDING_CLASSES_F = "F"
CONST_QUEUE4_FORWARDING_CLASSES_G = "G"
CONST_QUEUE4_FORWARDING_CLASSES_D = "D"
CONST_QUEUE4_FORWARDING_CLASSES_E = "E"
CONST_QUEUE4_FORWARDING_CLASSES_H = "H"
CONST_QUEUE2_FORWARDING_CLASSES_G = "G"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_QUEUE3_FORWARDING_CLASSES_NONE = "NONE"
def __init__(self, **kwargs):
""" Initializes a IngressQOSPolicy instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> ingressqospolicy = NUIngressQOSPolicy(id=u'xxxx-xxx-xxx-xxx', name=u'IngressQOSPolicy')
>>> ingressqospolicy = NUIngressQOSPolicy(data=my_dict)
"""
super(NUIngressQOSPolicy, self).__init__()
# Read/Write Attributes
self._name = None
self._parent_queue_associated_rate_limiter_id = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._assoc_egress_qos_id = None
self._queue1_associated_rate_limiter_id = None
self._queue1_forwarding_classes = None
self._queue2_associated_rate_limiter_id = None
self._queue2_forwarding_classes = None
self._queue3_associated_rate_limiter_id = None
self._queue3_forwarding_classes = None
self._queue4_associated_rate_limiter_id = None
self._queue4_forwarding_classes = None
self._custom_spq_depth = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="parent_queue_associated_rate_limiter_id", remote_name="parentQueueAssociatedRateLimiterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="assoc_egress_qos_id", remote_name="assocEgressQosId", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="queue1_associated_rate_limiter_id", remote_name="queue1AssociatedRateLimiterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="queue1_forwarding_classes", remote_name="queue1ForwardingClasses", attribute_type=list, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'NONE'])
self.expose_attribute(local_name="queue2_associated_rate_limiter_id", remote_name="queue2AssociatedRateLimiterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="queue2_forwarding_classes", remote_name="queue2ForwardingClasses", attribute_type=list, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'NONE'])
self.expose_attribute(local_name="queue3_associated_rate_limiter_id", remote_name="queue3AssociatedRateLimiterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="queue3_forwarding_classes", remote_name="queue3ForwardingClasses", attribute_type=list, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'NONE'])
self.expose_attribute(local_name="queue4_associated_rate_limiter_id", remote_name="queue4AssociatedRateLimiterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="queue4_forwarding_classes", remote_name="queue4ForwardingClasses", attribute_type=list, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'NONE'])
self.expose_attribute(local_name="custom_spq_depth", remote_name="customSpqDepth", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the QoS object
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the QoS object
"""
self._name = value
@property
def parent_queue_associated_rate_limiter_id(self):
""" Get parent_queue_associated_rate_limiter_id value.
Notes:
ID of the parent rate limiter associated with this Ingress QOS policy.
This attribute is named `parentQueueAssociatedRateLimiterID` in VSD API.
"""
return self._parent_queue_associated_rate_limiter_id
@parent_queue_associated_rate_limiter_id.setter
def parent_queue_associated_rate_limiter_id(self, value):
""" Set parent_queue_associated_rate_limiter_id value.
Notes:
ID of the parent rate limiter associated with this Ingress QOS policy.
This attribute is named `parentQueueAssociatedRateLimiterID` in VSD API.
"""
self._parent_queue_associated_rate_limiter_id = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
A description of the QoS object
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the QoS object
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def assoc_egress_qos_id(self):
""" Get assoc_egress_qos_id value.
Notes:
ID of object associated with this QoS object
This attribute is named `assocEgressQosId` in VSD API.
"""
return self._assoc_egress_qos_id
@assoc_egress_qos_id.setter
def assoc_egress_qos_id(self, value):
""" Set assoc_egress_qos_id value.
Notes:
ID of object associated with this QoS object
This attribute is named `assocEgressQosId` in VSD API.
"""
self._assoc_egress_qos_id = value
@property
def queue1_associated_rate_limiter_id(self):
""" Get queue1_associated_rate_limiter_id value.
Notes:
ID of the queue1 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue1AssociatedRateLimiterID` in VSD API.
"""
return self._queue1_associated_rate_limiter_id
@queue1_associated_rate_limiter_id.setter
def queue1_associated_rate_limiter_id(self, value):
""" Set queue1_associated_rate_limiter_id value.
Notes:
ID of the queue1 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue1AssociatedRateLimiterID` in VSD API.
"""
self._queue1_associated_rate_limiter_id = value
@property
def queue1_forwarding_classes(self):
""" Get queue1_forwarding_classes value.
Notes:
Queue1 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue1ForwardingClasses` in VSD API.
"""
return self._queue1_forwarding_classes
@queue1_forwarding_classes.setter
def queue1_forwarding_classes(self, value):
""" Set queue1_forwarding_classes value.
Notes:
Queue1 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue1ForwardingClasses` in VSD API.
"""
self._queue1_forwarding_classes = value
@property
def queue2_associated_rate_limiter_id(self):
""" Get queue2_associated_rate_limiter_id value.
Notes:
ID of the queue2 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue2AssociatedRateLimiterID` in VSD API.
"""
return self._queue2_associated_rate_limiter_id
@queue2_associated_rate_limiter_id.setter
def queue2_associated_rate_limiter_id(self, value):
""" Set queue2_associated_rate_limiter_id value.
Notes:
ID of the queue2 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue2AssociatedRateLimiterID` in VSD API.
"""
self._queue2_associated_rate_limiter_id = value
@property
def queue2_forwarding_classes(self):
""" Get queue2_forwarding_classes value.
Notes:
Queue2 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue2ForwardingClasses` in VSD API.
"""
return self._queue2_forwarding_classes
@queue2_forwarding_classes.setter
def queue2_forwarding_classes(self, value):
""" Set queue2_forwarding_classes value.
Notes:
Queue2 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue2ForwardingClasses` in VSD API.
"""
self._queue2_forwarding_classes = value
@property
def queue3_associated_rate_limiter_id(self):
""" Get queue3_associated_rate_limiter_id value.
Notes:
ID of the queue3 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue3AssociatedRateLimiterID` in VSD API.
"""
return self._queue3_associated_rate_limiter_id
@queue3_associated_rate_limiter_id.setter
def queue3_associated_rate_limiter_id(self, value):
""" Set queue3_associated_rate_limiter_id value.
Notes:
ID of the queue3 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue3AssociatedRateLimiterID` in VSD API.
"""
self._queue3_associated_rate_limiter_id = value
@property
def queue3_forwarding_classes(self):
""" Get queue3_forwarding_classes value.
Notes:
Queue3 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue3ForwardingClasses` in VSD API.
"""
return self._queue3_forwarding_classes
@queue3_forwarding_classes.setter
def queue3_forwarding_classes(self, value):
""" Set queue3_forwarding_classes value.
Notes:
Queue3 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue3ForwardingClasses` in VSD API.
"""
self._queue3_forwarding_classes = value
@property
def queue4_associated_rate_limiter_id(self):
""" Get queue4_associated_rate_limiter_id value.
Notes:
ID of the queue4 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue4AssociatedRateLimiterID` in VSD API.
"""
return self._queue4_associated_rate_limiter_id
@queue4_associated_rate_limiter_id.setter
def queue4_associated_rate_limiter_id(self, value):
""" Set queue4_associated_rate_limiter_id value.
Notes:
ID of the queue4 rate limiter associated with this Ingress QOS policy.
This attribute is named `queue4AssociatedRateLimiterID` in VSD API.
"""
self._queue4_associated_rate_limiter_id = value
@property
def queue4_forwarding_classes(self):
""" Get queue4_forwarding_classes value.
Notes:
Queue4 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue4ForwardingClasses` in VSD API.
"""
return self._queue4_forwarding_classes
@queue4_forwarding_classes.setter
def queue4_forwarding_classes(self, value):
""" Set queue4_forwarding_classes value.
Notes:
Queue4 Forwarding Classes for this Ingress QOS Policy Possible values are NONE, A, B, C, D, E, F, G, H, .
This attribute is named `queue4ForwardingClasses` in VSD API.
"""
self._queue4_forwarding_classes = value
@property
def custom_spq_depth(self):
""" Get custom_spq_depth value.
Notes:
Custom Depth of the Strict Priority Queue (Queue1). Measured as 'Number of Packets'. A value of zero indicates it is 'not set'. Valid values are in range 32 to 512.
This attribute is named `customSpqDepth` in VSD API.
"""
return self._custom_spq_depth
@custom_spq_depth.setter
def custom_spq_depth(self, value):
""" Set custom_spq_depth value.
Notes:
Custom Depth of the Strict Priority Queue (Queue1). Measured as 'Number of Packets'. A value of zero indicates it is 'not set'. Valid values are in range 32 to 512.
This attribute is named `customSpqDepth` in VSD API.
"""
self._custom_spq_depth = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
import datetime
import pytz
from juliabox.cloud import Compute
from juliabox.jbox_tasks import JBoxAsyncJob
from juliabox.jbox_util import JBoxCfg
from juliabox.jbox_container import BaseContainer
from juliabox.vol import VolMgr, JBoxVol
import docker.utils
from docker.utils import Ulimit
class SessContainer(BaseContainer):
PINGS = {}
DCKR_IMAGE = None
MEM_LIMIT = None
ULIMITS = None
# By default all groups have 1024 shares.
# A group with 100 shares will get a ~10% portion of the CPU time (https://wiki.archlinux.org/index.php/Cgroups)
CPU_LIMIT = 1024
PORTS_INTERNAL = [4200, 8000, 8998]
PORTS_USER = range(8050, 8053)
PORTS = PORTS_INTERNAL + PORTS_USER
VOLUMES = ['/home/juser', JBoxVol.PKG_MOUNT_POINT,JBoxVol.POLSAR_MOUNT_POINT]
MAX_CONTAINERS = 0
VALID_CONTAINERS = {}
INITIAL_DISK_USED_PCT = None
LAST_CPU_PCT = None
def get_host_ports(self):
if self.host_ports is None:
self.host_ports = self._get_host_ports(SessContainer.PORTS_INTERNAL)
return self.host_ports
def get_disk_allocated(self):
disk = VolMgr.get_disk_from_container(self.dockid, JBoxVol.JBP_USERHOME)
if disk is not None:
return disk.get_disk_allocated_size()
return 0
@staticmethod
def configure():
BaseContainer.DCKR = JBoxCfg.dckr
SessContainer.DCKR_IMAGE = JBoxCfg.get('interactive.docker_image')
SessContainer.MEM_LIMIT = JBoxCfg.get('interactive.mem_limit')
SessContainer.ULIMITS = []
limits = JBoxCfg.get('interactive.ulimits')
for (n, v) in limits.iteritems():
SessContainer.ULIMITS.append(Ulimit(name=n, soft=v, hard=v))
SessContainer.CPU_LIMIT = JBoxCfg.get('interactive.cpu_limit')
SessContainer.MAX_CONTAINERS = JBoxCfg.get('interactive.numlocalmax')
@staticmethod
def _create_new(name, email):
home_disk = VolMgr.get_disk_for_user(email)
pkgs_disk = VolMgr.get_pkg_mount_for_user(email)
polsar_disk = VolMgr.get_polsar_mount_for_user(email)
vols = {
home_disk.disk_path: {
'bind': SessContainer.VOLUMES[0],
'ro': False
},
pkgs_disk.disk_path: {
'bind': SessContainer.VOLUMES[1],
'ro': True
},
polsar_disk.disk_path: {
'bind': SessContainer.VOLUMES[2],
'ro': True
}
}
port_bindings = {p: ('127.0.0.1',) for p in SessContainer.PORTS}
hostcfg = docker.utils.create_host_config(binds=vols,
port_bindings=port_bindings,
mem_limit=SessContainer.MEM_LIMIT,
ulimits=SessContainer.ULIMITS)
jsonobj = BaseContainer.DCKR.create_container(SessContainer.DCKR_IMAGE,
detach=True,
host_config=hostcfg,
cpu_shares=SessContainer.CPU_LIMIT,
ports=SessContainer.PORTS,
volumes=SessContainer.VOLUMES,
hostname='juliabox',
name=name)
dockid = jsonobj["Id"]
cont = SessContainer(dockid)
SessContainer.log_info("Created %s with hostcfg %r, cpu_limit: %r, volumes: %r", cont.debug_str(), hostcfg,
SessContainer.CPU_LIMIT, vols)
return cont
@staticmethod
def invalidate_container(cname):
if not cname.startswith("/"):
cname = "/" + cname
if SessContainer.VALID_CONTAINERS.has_key(cname):
SessContainer.log_info("Invalidating container %s", cname)
del SessContainer.VALID_CONTAINERS[cname]
@staticmethod
def launch_by_name(name, email, reuse=True):
SessContainer.log_info("Launching container %s", name)
cont = SessContainer.get_by_name(name)
if (cont is not None) and not reuse:
cont.delete()
cont = None
if cont is None:
cont = SessContainer._create_new(name, email)
try:
if not (cont.is_running() or cont.is_restarting()):
cont.start()
#else:
# cont.restart()
except:
cont.delete()
raise
return cont
@staticmethod
def maintain(max_timeout=0, inactive_timeout=0):
SessContainer.log_info("Starting container maintenance...")
tnow = datetime.datetime.now(pytz.utc)
tmin = datetime.datetime(datetime.MINYEAR, 1, 1, tzinfo=pytz.utc)
stop_before = (tnow - datetime.timedelta(seconds=max_timeout)) if (max_timeout > 0) else tmin
stop_inacive_before = (tnow - datetime.timedelta(seconds=inactive_timeout)) if (inactive_timeout > 0) else tmin
all_containers = BaseContainer.session_containers(allcontainers=True)
all_cnames = {}
container_id_list = []
for cdesc in all_containers:
cid = cdesc['Id']
cont = SessContainer(cid)
container_id_list.append(cid)
cname = cont.get_name()
if cname is None:
SessContainer.log_debug("Ignoring %s", cont.debug_str())
continue
all_cnames[cname] = cid
c_is_active = cont.is_running() or cont.is_restarting()
last_ping = SessContainer._get_last_ping(cname)
# if we don't have a ping record, create one (we must have restarted)
if (last_ping is None) and c_is_active:
SessContainer.log_info("Discovered new container %s", cont.debug_str())
SessContainer.record_ping(cname)
start_time = cont.time_started()
# check that start time is not absurdly small (indicates a continer that's starting up)
start_time_not_zero = (tnow-start_time).total_seconds() < (365*24*60*60)
if (start_time < stop_before) and start_time_not_zero:
# don't allow running beyond the limit for long running sessions
# SessContainer.log_info("time_started " + str(cont.time_started()) +
# " delete_before: " + str(delete_before) +
# " cond: " + str(cont.time_started() < delete_before))
SessContainer.log_warn("Running beyond allowed time %s. Scheduling cleanup.", cont.debug_str())
SessContainer.invalidate_container(cont.get_name())
JBoxAsyncJob.async_backup_and_cleanup(cont.dockid)
elif (last_ping is not None) and c_is_active and (last_ping < stop_inacive_before):
# if inactive for too long, stop it
# SessContainer.log_info("last_ping " + str(last_ping) + " stop_before: " + str(stop_before) +
# " cond: " + str(last_ping < stop_before))
SessContainer.log_warn("Inactive beyond allowed time %s. Scheduling cleanup.", cont.debug_str())
SessContainer.invalidate_container(cont.get_name())
JBoxAsyncJob.async_backup_and_cleanup(cont.dockid)
# delete ping entries for non exixtent containers
for cname in SessContainer.PINGS.keys():
if cname not in all_cnames:
del SessContainer.PINGS[cname]
SessContainer.VALID_CONTAINERS = all_cnames
VolMgr.refresh_disk_use_status(container_id_list=container_id_list)
SessContainer.log_info("Finished container maintenance.")
@staticmethod
def is_valid_container(cname, hostports):
cont = None
if cname in SessContainer.VALID_CONTAINERS:
try:
cont = SessContainer(SessContainer.VALID_CONTAINERS[cname])
except:
pass
else:
all_containers = SessContainer.session_containers(allcontainers=True)
for cdesc in all_containers:
cid = cdesc['Id']
cont = SessContainer(cid)
cont_name = cont.get_name()
SessContainer.VALID_CONTAINERS[cont_name] = cid
if cname == cont_name:
break
if cont is None:
return False
try:
return hostports == cont.get_host_ports()
except:
return False
@staticmethod
def get_active_sessions():
instances = Compute.get_all_instances()
active_sessions = set()
for inst in instances:
try:
sessions = JBoxAsyncJob.sync_session_status(inst)['data']
if len(sessions) > 0:
for sess_id in sessions.keys():
active_sessions.add(sess_id)
except:
SessContainer.log_error("Error receiving sessions list from %r", inst)
return active_sessions
def backup_and_cleanup(self):
self.stop()
self.delete(backup=True)
# def backup(self):
# SessContainer.log_info("Backing up %s", self.debug_str())
# disk = VolMgr.get_disk_from_container(self.dockid)
# if disk is not None:
# disk.backup()
@staticmethod
def get_by_name(name):
if not name.startswith("/"):
nname = "/" + unicode(name)
else:
nname = unicode(name)
for c in SessContainer.session_containers(allcontainers=True):
if ('Names' in c) and (c['Names'] is not None) and (c['Names'][0] == nname):
return SessContainer(c['Id'])
return None
@staticmethod
def record_ping(name):
SessContainer.PINGS[name] = datetime.datetime.now(pytz.utc)
# log_info("Recorded ping for %s", name)
@staticmethod
def _get_last_ping(name):
return SessContainer.PINGS[name] if (name in SessContainer.PINGS) else None
def on_stop(self):
self.record_usage()
def on_start(self):
cname = self.get_name()
if cname is not None:
SessContainer.record_ping(cname)
def on_restart(self):
self.on_start()
def on_kill(self):
self.on_stop()
def before_delete(self, cname, backup):
for disktype in (JBoxVol.JBP_USERHOME, JBoxVol.JBP_PKGBUNDLE, JBoxVol.JBP_DATA, JBoxVol.JBP_POLSAR):
disk = VolMgr.get_disk_from_container(self.dockid, disktype)
if disk is not None:
disk.release(backup=backup)
if cname is not None:
SessContainer.PINGS.pop(cname, None)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
This module implements reading and writing of ShengBTE CONTROL files.
"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.io.vasp import Kpoints
try:
import f90nml
except ImportError:
f90nml = None
__author__ = "Rees Chang, Alex Ganose"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__email__ = "rc564@cornell.edu, aganose@lbl.gov"
__date__ = "June 27, 2019"
class Control(MSONable, dict):
"""
Class for reading, updating, and writing ShengBTE CONTROL files.
See https://bitbucket.org/sousaw/shengbte/src/master/ for more
detailed description and default values of CONTROL arguments.
"""
required_params = [
"nelements",
"natoms",
"ngrid",
"lattvec",
"types",
"elements",
"positions",
"scell",
]
allocations_keys = ["nelements", "natoms", "ngrid", "norientations"]
crystal_keys = [
"lfactor",
"lattvec",
"types",
"elements",
"positions",
"masses",
"gfactors",
"epsilon",
"born",
"scell",
"orientations",
]
params_keys = [
"t",
"t_min",
"t_max",
"t_step",
"omega_max",
"scalebroad",
"rmin",
"rmax",
"dr",
"maxiter",
"nticks",
"eps",
]
flags_keys = [
"nonanalytic",
"convergence",
"isotopes",
"autoisotopes",
"nanowires",
"onlyharmonic",
"espresso",
]
def __init__(self, ngrid: Optional[List[int]] = None, temperature: Union[float, Dict[str, float]] = 300, **kwargs):
"""
Args:
ngrid: Reciprocal space grid density as a list of 3 ints.
temperature: The temperature to calculate the lattice thermal
conductivity for. Can be given as a single float, or a dictionary
with the keys "min", "max", "step".
**kwargs: Other ShengBTE parameters. Several parameters are required
for ShengBTE to run - we have listed these parameters below:
- nelements (int): number of different elements in the compound
- natoms (int): number of atoms in the unit cell
- lattvec (size 3x3 array): real-space lattice vectors, in units
of lfactor
- lfactor (float): unit of measurement for lattice vectors (nm).
I.e., set to 0.1 if lattvec given in Angstrom.
- types (size natom list): a vector of natom integers, ranging
from 1 to nelements, assigning an element to each atom in the
system
- elements (size natom list): a vector of element names
- positions (size natomx3 array): atomic positions in lattice
coordinates
- scell (size 3 list): supercell sizes along each crystal axis
used for the 2nd-order force constant calculation
"""
super().__init__()
if ngrid is None:
ngrid = [25, 25, 25]
self["ngrid"] = ngrid
if isinstance(temperature, (int, float)):
self["t"] = temperature
elif isinstance(temperature, dict):
self["t_min"] = temperature["min"]
self["t_max"] = temperature["max"]
self["t_step"] = temperature["step"]
else:
raise ValueError("Unsupported temperature type, must be float or dict")
self.update(kwargs)
@classmethod
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. " "Please get it at https://pypi.org/project/f90nml.",
)
def from_file(cls, filepath: str):
"""
Read a CONTROL namelist file and output a 'Control' object
Args:
filepath: Path of the CONTROL file.
Returns:
'Control' object with parameters instantiated.
"""
nml = f90nml.read(filepath)
sdict = nml.todict()
all_dict: Dict[str, Any] = {}
all_dict.update(sdict["allocations"])
all_dict.update(sdict["crystal"])
all_dict.update(sdict["parameters"])
all_dict.update(sdict["flags"])
all_dict.pop("_start_index") # remove unnecessary cruft
return cls.from_dict(all_dict)
@classmethod
def from_dict(cls, control_dict: Dict):
"""
Write a CONTROL file from a Python dictionary. Description and default
parameters can be found at
https://bitbucket.org/sousaw/shengbte/src/master/.
Note some parameters are mandatory. Optional parameters default here to
None and will not be written to file.
Args:
control_dict: A Python dictionary of ShengBTE input parameters.
"""
return cls(**control_dict)
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. " "Please get it at https://pypi.org/project/f90nml.",
)
def to_file(self, filename: str = "CONTROL"):
"""
Writes ShengBTE CONTROL file from 'Control' object
Args:
filename: A file name.
"""
for param in self.required_params:
if param not in self.as_dict():
warnings.warn("Required parameter '{}' not specified!".format(param))
alloc_dict = _get_subdict(self, self.allocations_keys)
alloc_nml = f90nml.Namelist({"allocations": alloc_dict})
control_str = str(alloc_nml) + "\n"
crystal_dict = _get_subdict(self, self.crystal_keys)
crystal_nml = f90nml.Namelist({"crystal": crystal_dict})
control_str += str(crystal_nml) + "\n"
params_dict = _get_subdict(self, self.params_keys)
params_nml = f90nml.Namelist({"parameters": params_dict})
control_str += str(params_nml) + "\n"
flags_dict = _get_subdict(self, self.flags_keys)
flags_nml = f90nml.Namelist({"flags": flags_dict})
control_str += str(flags_nml) + "\n"
with open(filename, "w") as file:
file.write(control_str)
@classmethod
def from_structure(cls, structure: Structure, reciprocal_density: Optional[int] = 50000, **kwargs):
"""
Get a ShengBTE control object from a structure.
Args:
structure: A structure object.
reciprocal_density: If not None, the q-point grid ("ngrid") will be
set using this density.
kwargs: Additional options to be passed to the Control constructor.
See the docstring of the __init__ method for more details
Returns:
A ShengBTE control object.
"""
elements = list(map(str, structure.composition.elements))
unique_nums = np.unique(structure.atomic_numbers)
types_dict = dict(zip(unique_nums, range(len(unique_nums))))
types = [types_dict[i] + 1 for i in structure.atomic_numbers]
control_dict = {
"nelements": structure.ntypesp,
"natoms": structure.num_sites,
"norientations": 0,
"lfactor": 0.1,
"lattvec": structure.lattice.matrix.tolist(),
"elements": elements,
"types": types,
"positions": structure.frac_coords.tolist(),
}
if reciprocal_density:
kpoints = Kpoints.automatic_density(structure, reciprocal_density)
control_dict["ngrid"] = kpoints.kpts[0]
control_dict.update(**kwargs)
return Control(**control_dict)
def get_structure(self) -> Structure:
"""
Get a pymatgen Structure from a ShengBTE control object.
The control object must have the "lattvec", "types", "elements", and
"positions" settings otherwise an error will be thrown.
Returns:
The structure.
"""
required = ["lattvec", "types", "elements", "positions"]
if not all(r in self for r in required):
raise ValueError("All of ['lattvec', 'types', 'elements', 'positions'] must be " "in control object")
unique_elements = self["elements"]
n_unique_elements = len(unique_elements)
element_map = dict(zip(range(1, n_unique_elements + 1), unique_elements))
species = [element_map[i] for i in self["types"]]
cell = np.array(self["lattvec"])
if "lfactor" in self:
cell *= self["lfactor"] * 10 # to nm then to Angstrom
return Structure(cell, species, self["positions"])
def as_dict(self):
"""
Returns: MSONAble dict
"""
return dict(self)
def _get_subdict(master_dict, subkeys):
"""Helper method to get a set of keys from a larger dictionary"""
return {k: master_dict[k] for k in subkeys if k in master_dict and master_dict[k] is not None}
|
|
#-*- coding: utf-8 -*-
#import wxversion
#wxversion.select('2.8')
import wx
import os
import re
import json
import sys
import string
import sqlite3
import datetime
#import psycopg2
import wx.grid as gridlib
import wx.lib.masked as masked
import Printer as PRx
import HandyUtilities as HU
import Common_Dialogs as CDialog
from decimal import Decimal, ROUND_HALF_UP, ROUND_UP, ROUND_05UP
class FillIn(object):
def __init__(self,gridname,debug=True):
self.grid = wx.FindWindowByName(gridname)
HU.BeVerbose('Fill In ...')
def POSCustAcctGrid(self,custNum=None,debug=True):
HU.BeVerbose('... POS Cust Acct Grid')
grid = self.grid
acctInfo = HU.SetAcctInfo(grid.GetName())
if custNum is None:
set_list = [('Account Number','PLACEHOLDER')]
#grid.SetCellValue(0,0,'PLACEHOLDER')
HU.FillGrid(grid.GetName(),set_list, col=0)
return
fields = '''cust_num, first_name, last_name,
address_acct_num, phone_numbers'''
returnd = HU.LookupDB('customer_basic_info').Mode2(custNum,'cust_num',fields)
HU.Debugger(returnd,True)
(cust_numd,first_named, last_named, address_acct_numd,phoned_JSON) = returnd
fields = '''street_num, street_direction, street_name,
street_type,unit,city,zipcode,state,address2'''
returnd = HU.LookupDB('address_accounts').Mode2(address_acct_numd,'addr_acct_num',fields)
HU.Debugger('Returnd : {} = {}'.format(returnd, len(returnd)),debug)
(street_numd,street_directiond,street_named,street_typed, unitd,cityd,zipcoded,stated,addressed2) = returnd
print('flname : {} {}'.format(first_named, last_named))
acctInfo.custAcctNum(cust_numd)
acctInfo.name(first_named, last_named)
#flname = '{0} {1}'.format(first_named, last_named)
addressed = HU.AllinaRow(street_numd,street_directiond,street_named,street_typed,unit=unitd)
acctInfo.address(addressed)
acctInfo.cistzi(cityd, stated, zipcoded)
fields = 'fixed_discount, discount_amt'
returnd = HU.LookupDB('customer_sales_options').Mode2(custNum,'cust_num',fields)
fixed_discountd, discount_amtd = '0', None
(fixed_discountd, discount_amtd) = returnd
print('fixed _discoutn : {} {}'.format(fixed_discountd, discount_amtd))
acctInfo.discountd(fixed_discountd, discount_amtd)
grid = wx.FindWindowByName('pos_acct_grid')
rows = grid.GetNumberRows()
cols = grid.GetNumberCols()
phoned = json.loads(phoned_JSON)
print("Phoned : ",phoned)
phone_list = []
if phoned:
for key,value in phoned.items():
print("Key : {0}, Value : {1}".format(key, value))
whered = value[0].strip()
valued = re.sub('[()\ -]+', '', value[1])
sets = '{}{}{}-{}{}{}-{}{}{}{}'.format(*valued)
phone_list_tup = '{0} : {1}'.format(whered, sets)
phone_list.append(phone_list_tup)
print("Phone List : ",phone_list)
print("pHone List 2 : ",phone_list[0])
grid.SetCellValue(6,0,phone_list[0])
grid.SetCellEditor(6,0,HU.GridCellComboBox(phone_list))
def POSAddrAcctGrid(self, custNum, addrAcctNum=None, debug=True):
HU.BeVerbose('... POS Addr Acct Grid')
grid = self.grid
print('Customer Num : ',custNum)
if custNum == '' or custNum is None:
return
fields = '''cust_num,first_name,last_name,cust_num,
address_acct_num,phone_numbers,rental_of'''
returnd = HU.LookupDB('customer_basic_info').Mode2(custNum, 'cust_num',fields)
HU.Debugger(returnd,True)
(cust_numd,first_named, last_named, cust_numd,
address_acct_numd,phoned_JSON,rental_JSON) = returnd
if addrAcctNum is not None:
address_acct_numd = addrAcctNum
fields = '''street_num, street_direction, street_name,
street_type, unit, city, zipcode, state,
address2'''
returnd = HU.LookupDB('address_accounts').Mode2(address_acct_numd,'addr_acct_num',fields)
(street_numd,street_directiond,street_named,street_typed,unitd,
cityd,zipcoded,stated,addressed2) = returnd
rental_dict = {}
if rental_JSON is not None and len(rental_JSON) > 0 and addrAcctNum is None:
rental_list = json.loads(rental_JSON)
dict_cnt = len(rental_dict)
print('dict_cnt : ',dict_cnt)
print('address_acct_numd : ',address_acct_numd)
print('rental_dict : ',rental_dict)
rental_list.append(address_acct_numd)
addr_choice = []
for addr_num in rental_list:
fields = 'address0,city,state,unit'
returnd = HU.LookupDB('address_accounts').Mode2(addr_num,'addr_acct_num',fields)
(address0d, cityd,statd,unitd) = returnd
if unitd is None:
main_addr = '{}\t{} UNIT {}, {}, {}'.format(addr_num,address0d,unitd,cityd,statd)
else:
main_addr = '{}\t{}, {}, {}'.format(addr_num,address0d,cityd,statd)
addr_choice.append(main_addr)
grid.SetCellValue(1,0,addr_choice[0])
grid.SetCellEditor(1,0,HU.GridCellComboBox(addr_choice))
style = wx.DEFAULT_FRAME_STYLE & (wx.CLOSE_BOX) & (~wx.MAXIMIZE_BOX)
dlg = CDialog.AddressSelectionDialog(self,
title="Address Selection",
style=style, addrList=rental_list)
dlg.ShowModal()
try:
self.addrPicked = dlg.addrPicked.upper().strip()
except:
pass
dlg.Destroy()
print("addr _ Choice : {}".format(addr_choice))
regex = re.compile(self.addrPicked)
idxs = [i for i, item in enumerate(addr_choice) if re.search(regex, item)]
print('IDXS : ',idxs[0])
#choiceKey = addr_choice.index(self.addrPicked)
print('addr_choice[idxs[0]] : ',addr_choice[idxs[0]])
grid.SetCellValue(1,0,addr_choice[idxs[0]])
else:
address0d = HU.AllinaRow(street_numd, street_directiond,street_named, street_typed, unitd)
main_addr = '{}\t{}, {}, {}'.format(addrAcctNum,address0d,cityd,stated)
grid.SetCellValue(1,0,main_addr)
print("Address Account")
rowname = 'Address Account'
if rowname == 'Address Account':
if rowname:
print("ON CLICK ADDRESS ACCOUNT CHANGE")
newAccount = grid.GetCellValue(1,0)
print("New Account : ",newAccount)
p = re.search('A[0-9]+',newAccount)
if p is None:
return
addr_num = p.group(0)
fields = 'address0,city,state,zipcode,unit'
returnd = HU.LookupDB('address_accounts').Mode2(addr_num,'addr_acct_num',fields)
print('address change : {}'.format(returnd))
(address0d, cityd,statd,zipd,unitd) = returnd
acctInfo = HU.SetAcctInfo(grid.GetName())
cszd = '{}, {} {}'.format(cityd,statd,zipd)
set_list = [('Address 1',address0d),('City, State, Zip',cszd)]
acctInfo.address(address0d)
acctInfo.cistzi(cityd,statd,zipd)
#grid.SetCellValue(3,0,address0d)
#grid.SetCellValue(5,0,cszd)
readonly_list = ['Name','Address 1','Address 2',
'City, State, Zip','A/R/Avail Credit',
'Discount %','Ship To']
HU.GridListReadOnly(grid.GetName(),readonly_list)
print("DONE & DONE")
HU.GridFocusNGo('pos_transactions_grid',0)
def POSTransGrid(self, gridname, transId):
grid = wx.FindWindowByName(gridname)
query = 'SELECT upc,description,quantity,unit_price,discount,total_price,tax1,tax2,tax3,tax4,tax_never FROM transactions WHERE transaction_id = ?'
data = (transId,)
returnd = HU.SQConnect(query,data).ALL()
print('Returnd : ',returnd)
idx = 0
for upc, desc, qty, uprice, disc, totprice,tax1,tax2,tax3,tax4,tax5 in returnd:
taxd = [tax1, tax2, tax3, tax4, tax5]
isTaxed = 'Tx'
for tax in taxd:
if tax == 1:
isTaxed = 'nTx'
break
setList = [('Item Number',upc),('Description',desc),('Price',HU.RoundIt(uprice, '1.00')),
('Quantity',HU.RoundIt(qty,'1.00')),('Total',HU.RoundIt(totprice, '1.00')),('Disc',disc),
('Tx',isTaxed)]
HU.FillGrid(gridname, setList, row=idx)
idx += 1
|
|
import hashlib
import json
import os
import uuid
from django import forms
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel,
TabbedInterface)
from wagtail.admin.forms import WagtailAdminPageForm
from wagtail.admin.mail import send_mail
from wagtail.contrib.forms.forms import FormBuilder
from wagtail.contrib.forms.models import (
FORM_FIELD_CHOICES, AbstractEmailForm, AbstractFormField, AbstractFormSubmission)
from wagtail.contrib.forms.views import SubmissionsListView
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.contrib.sitemaps import Sitemap
from wagtail.contrib.table_block.blocks import TableBlock
from wagtail.core.blocks import (
CharBlock, FieldBlock, RawHTMLBlock, RichTextBlock, StreamBlock, StructBlock)
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, PageManager, PageQuerySet, Task, TranslatableMixin
from wagtail.documents import get_document_model
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.documents.models import AbstractDocument, Document
from wagtail.images import get_image_model
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.search import index
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from .forms import FormClassAdditionalFieldPageForm, ValidatedPageForm
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# Link fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Simple page
class SimplePage(Page):
content = models.TextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('content'),
]
def get_admin_display_title(self):
return "%s (simple page)" % super().get_admin_display_title()
# Page with Excluded Fields when copied
class PageWithExcludedCopyField(Page):
content = models.TextField()
# Exclude this field from being copied
special_field = models.CharField(
blank=True, max_length=255, default='Very Special')
exclude_fields_in_copy = ['special_field']
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('special_field'),
FieldPanel('content'),
]
class PageWithOldStyleRouteMethod(Page):
"""
Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse
rather than a Page instance. As subclasses of Page may override route,
we need to continue accepting this convention (albeit as a deprecated API).
"""
content = models.TextField()
template = 'tests/simple_page.html'
def route(self, request, path_components):
return self.serve(request)
# File page
class FilePage(Page):
file_field = models.FileField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('file_field'),
]
# Event page
class EventPageCarouselItem(TranslatableMixin, Orderable, CarouselItem):
page = ParentalKey('tests.EventPage', related_name='carousel_items', on_delete=models.CASCADE)
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
class EventPageRelatedLink(TranslatableMixin, Orderable, RelatedLink):
page = ParentalKey('tests.EventPage', related_name='related_links', on_delete=models.CASCADE)
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
class EventPageSpeakerAward(TranslatableMixin, Orderable, models.Model):
speaker = ParentalKey('tests.EventPageSpeaker', related_name='awards', on_delete=models.CASCADE)
name = models.CharField("Award name", max_length=255)
date_awarded = models.DateField(null=True, blank=True)
panels = [
FieldPanel('name'),
FieldPanel('date_awarded'),
]
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
class EventPageSpeaker(TranslatableMixin, Orderable, LinkFields, ClusterableModel):
page = ParentalKey('tests.EventPage', related_name='speakers', related_query_name='speaker', on_delete=models.CASCADE)
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.first_name + " " + self.last_name
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
InlinePanel('awards', label="Awards"),
]
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
class EventCategory(TranslatableMixin, models.Model):
name = models.CharField("Name", max_length=255)
def __str__(self):
return self.name
# Override the standard WagtailAdminPageForm to add validation on start/end dates
# that appears as a non-field error
class EventPageForm(WagtailAdminPageForm):
def clean(self):
cleaned_data = super().clean()
# Make sure that the event starts before it ends
start_date = cleaned_data['date_from']
end_date = cleaned_data['date_to']
if start_date and end_date and start_date > end_date:
raise ValidationError('The end date must be after the start date')
return cleaned_data
class EventPage(Page):
date_from = models.DateField("Start date", null=True)
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=EVENT_AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
categories = ParentalManyToManyField(EventCategory, blank=True)
search_fields = [
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
index.FilterField('url_path'),
]
password_required_template = 'tests/event_page_password_required.html'
base_form_class = EventPageForm
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers", heading="Speaker lineup"),
InlinePanel('related_links', label="Related links"),
FieldPanel('categories'),
# InlinePanel related model uses `pk` not `id`
InlinePanel('head_counts', label='Head Counts'),
]
promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class HeadCountRelatedModelUsingPK(models.Model):
"""Related model that uses a custom primary key (pk) not id"""
custom_id = models.AutoField(primary_key=True)
event_page = ParentalKey(
EventPage,
on_delete=models.CASCADE,
related_name='head_counts'
)
head_count = models.IntegerField()
panels = [FieldPanel('head_count')]
# Override the standard WagtailAdminPageForm to add field that is not in model
# so that we can test additional potential issues like comparing versions
class FormClassAdditionalFieldPage(Page):
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('location'),
FieldPanel('body'),
FieldPanel('code'), # not in model, see set base_form_class
]
base_form_class = FormClassAdditionalFieldPageForm
# Just to be able to test multi table inheritance
class SingleEventPage(EventPage):
excerpt = models.TextField(
max_length=255,
blank=True,
null=True,
help_text="Short text to describe what is this action about"
)
# Give this page model a custom URL routing scheme
def get_url_parts(self, request=None):
url_parts = super().get_url_parts(request=request)
if url_parts is None:
return None
else:
site_id, root_url, page_path = url_parts
return (site_id, root_url, page_path + 'pointless-suffix/')
def route(self, request, path_components):
if path_components == ['pointless-suffix']:
# treat this as equivalent to a request for this page
return super().route(request, [])
else:
# fall back to default routing rules
return super().route(request, path_components)
def get_admin_display_title(self):
return "%s (single event)" % super().get_admin_display_title()
content_panels = [FieldPanel('excerpt')] + EventPage.content_panels
# "custom" sitemap object
class EventSitemap(Sitemap):
pass
# Event index (has a separate AJAX template, and a custom template context)
class EventIndex(Page):
intro = RichTextField(blank=True)
ajax_template = 'tests/includes/event_listing.html'
def get_events(self):
return self.get_children().live().type(EventPage)
def get_paginator(self):
return Paginator(self.get_events(), 4)
def get_context(self, request, page=1):
# Pagination
paginator = self.get_paginator()
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
# Update context
context = super().get_context(request)
context['events'] = events
return context
def route(self, request, path_components):
if self.live and len(path_components) == 1:
try:
return self.serve(request, page=int(path_components[0]))
except (TypeError, ValueError):
pass
return super().route(request, path_components)
def get_static_site_paths(self):
# Get page count
page_count = self.get_paginator().num_pages
# Yield a path for each page
for page in range(page_count):
yield '/%d/' % (page + 1)
# Yield from superclass
for path in super().get_static_site_paths():
yield path
def get_sitemap_urls(self, request=None):
# Add past events url to sitemap
return super().get_sitemap_urls(request=request) + [
{
'location': self.full_url + 'past/',
'lastmod': self.latest_revision_created_at
}
]
def get_cached_paths(self):
return super().get_cached_paths() + [
'/past/'
]
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields', on_delete=models.CASCADE)
class FormPage(AbstractEmailForm):
def get_context(self, request):
context = super().get_context(request)
context['greeting'] = "hello world"
return context
# This is redundant (SubmissionsListView is the default view class), but importing
# SubmissionsListView in this models.py helps us to confirm that this recipe
# https://docs.wagtail.org/en/stable/reference/contrib/forms/customisation.html#customise-form-submissions-listing-in-wagtail-admin
# works without triggering circular dependency issues -
# see https://github.com/wagtail/wagtail/issues/6265
submissions_list_view_class = SubmissionsListView
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with a non-HTML extension
class JadeFormField(AbstractFormField):
page = ParentalKey('JadeFormPage', related_name='form_fields', on_delete=models.CASCADE)
class JadeFormPage(AbstractEmailForm):
template = "tests/form_page.jade"
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Form page that redirects to a different page
class RedirectFormField(AbstractFormField):
page = ParentalKey('FormPageWithRedirect', related_name='form_fields', on_delete=models.CASCADE)
class FormPageWithRedirect(AbstractEmailForm):
thank_you_redirect_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
def get_context(self, request):
context = super(FormPageWithRedirect, self).get_context(request)
context['greeting'] = "hello world"
return context
def render_landing_page(self, request, form_submission=None, *args, **kwargs):
"""
Renders the landing page OR if a receipt_page_redirect is chosen redirects to this page.
"""
if self.thank_you_redirect_page:
return redirect(self.thank_you_redirect_page.url, permanent=False)
return super(FormPageWithRedirect, self).render_landing_page(request, form_submission, *args, **kwargs)
content_panels = [
FieldPanel('title', classname="full title"),
PageChooserPanel('thank_you_redirect_page'),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with a custom FormSubmission
class FormPageWithCustomSubmission(AbstractEmailForm):
"""
This Form page:
* Have custom submission model
* Have custom related_name (see `FormFieldWithCustomSubmission.page`)
* Saves reference to a user
* Doesn't render html form, if submission for current user is present
"""
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
context['greeting'] = "hello world"
return context
def get_form_fields(self):
return self.custom_form_fields.all()
def get_data_fields(self):
data_fields = [
('useremail', 'User email'),
]
data_fields += super().get_data_fields()
return data_fields
def get_submission_class(self):
return CustomFormPageSubmission
def process_form_submission(self, form):
form_submission = self.get_submission_class().objects.create(
form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),
page=self, user=form.user
)
if self.to_address:
addresses = [x.strip() for x in self.to_address.split(',')]
content = '\n'.join([x[1].label + ': ' + str(form.data.get(x[0])) for x in form.fields.items()])
send_mail(self.subject, content, addresses, self.from_address,)
# process_form_submission should now return the created form_submission
return form_submission
def serve(self, request, *args, **kwargs):
if self.get_submission_class().objects.filter(page=self, user__pk=request.user.pk).exists():
return TemplateResponse(
request,
self.template,
self.get_context(request)
)
return super().serve(request, *args, **kwargs)
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('custom_form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
class FormFieldWithCustomSubmission(AbstractFormField):
page = ParentalKey(FormPageWithCustomSubmission, on_delete=models.CASCADE, related_name='custom_form_fields')
class CustomFormPageSubmission(AbstractFormSubmission):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def get_data(self):
form_data = super().get_data()
form_data.update({
'useremail': self.user.email,
})
return form_data
# Custom form page with custom submission listing view and form submission
class FormFieldForCustomListViewPage(AbstractFormField):
page = ParentalKey(
'FormPageWithCustomSubmissionListView',
related_name='form_fields',
on_delete=models.CASCADE
)
class FormPageWithCustomSubmissionListView(AbstractEmailForm):
"""Form Page with customised submissions listing view"""
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
def get_submissions_list_view_class(self):
from .views import CustomSubmissionsListView
return CustomSubmissionsListView
def get_submission_class(self):
return CustomFormPageSubmission
def get_data_fields(self):
data_fields = [
('useremail', 'User email'),
]
data_fields += super().get_data_fields()
return data_fields
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with custom FormBuilder
EXTENDED_CHOICES = FORM_FIELD_CHOICES + (('ipaddress', 'IP Address'),)
class ExtendedFormField(AbstractFormField):
"""Override the field_type field with extended choices."""
page = ParentalKey(
'FormPageWithCustomFormBuilder',
related_name='form_fields',
on_delete=models.CASCADE)
field_type = models.CharField(
verbose_name='field type', max_length=16, choices=EXTENDED_CHOICES)
class CustomFormBuilder(FormBuilder):
"""
A custom FormBuilder that has an 'ipaddress' field with
customised create_singleline_field with shorter max_length
"""
def create_singleline_field(self, field, options):
options['max_length'] = 120 # usual default is 255
return forms.CharField(**options)
def create_ipaddress_field(self, field, options):
return forms.GenericIPAddressField(**options)
class FormPageWithCustomFormBuilder(AbstractEmailForm):
"""
A Form page that has a custom form builder and uses a custom
form field model with additional field_type choices.
"""
form_builder = CustomFormBuilder
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Snippets
class AdvertPlacement(models.Model):
page = ParentalKey('wagtailcore.Page', related_name='advert_placements', on_delete=models.CASCADE)
advert = models.ForeignKey('tests.Advert', related_name='+', on_delete=models.CASCADE)
colour = models.CharField(max_length=255)
class AdvertTag(TaggedItemBase):
content_object = ParentalKey('Advert', related_name='tagged_items', on_delete=models.CASCADE)
class Advert(ClusterableModel):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
tags = TaggableManager(through=AdvertTag, blank=True)
panels = [
FieldPanel('url'),
FieldPanel('text'),
FieldPanel('tags'),
]
def __str__(self):
return self.text
register_snippet(Advert)
class AdvertWithCustomPrimaryKey(ClusterableModel):
advert_id = models.CharField(max_length=255, primary_key=True)
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(AdvertWithCustomPrimaryKey)
class AdvertWithCustomUUIDPrimaryKey(ClusterableModel):
advert_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(AdvertWithCustomUUIDPrimaryKey)
class AdvertWithTabbedInterface(models.Model):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
something_else = models.CharField(max_length=255)
advert_panels = [
FieldPanel('url'),
FieldPanel('text'),
]
other_panels = [
FieldPanel('something_else'),
]
edit_handler = TabbedInterface([
ObjectList(advert_panels, heading='Advert'),
ObjectList(other_panels, heading='Other'),
])
def __str__(self):
return self.text
class Meta:
ordering = ('text',)
register_snippet(AdvertWithTabbedInterface)
class StandardIndex(Page):
""" Index for the site """
parent_page_types = [Page]
# A custom panel setup where all Promote fields are placed in the Content tab instead;
# we use this to test that the 'promote' tab is left out of the output when empty
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('seo_title'),
FieldPanel('slug'),
InlinePanel('advert_placements', label="Adverts"),
]
promote_panels = []
class StandardChild(Page):
pass
# Test overriding edit_handler with a custom one
StandardChild.edit_handler = TabbedInterface([
ObjectList(StandardChild.content_panels, heading='Content'),
ObjectList(StandardChild.promote_panels, heading='Promote'),
ObjectList(StandardChild.settings_panels, heading='Settings', classname='settings'),
ObjectList([], heading='Dinosaurs'),
], base_form_class=WagtailAdminPageForm)
class BusinessIndex(Page):
""" Can be placed anywhere, can only have Business children """
subpage_types = ['tests.BusinessChild', 'tests.BusinessSubIndex']
class BusinessSubIndex(Page):
""" Can be placed under BusinessIndex, and have BusinessChild children """
# BusinessNowherePage is 'incorrectly' added here as a possible child.
# The rules on BusinessNowherePage prevent it from being a child here though.
subpage_types = ['tests.BusinessChild', 'tests.BusinessNowherePage']
parent_page_types = ['tests.BusinessIndex', 'tests.BusinessChild']
class BusinessChild(Page):
""" Can only be placed under Business indexes, no children allowed """
subpage_types = []
parent_page_types = ['tests.BusinessIndex', BusinessSubIndex]
class BusinessNowherePage(Page):
""" Not allowed to be placed anywhere """
parent_page_types = []
class TaggedPageTag(TaggedItemBase):
content_object = ParentalKey('tests.TaggedPage', related_name='tagged_items', on_delete=models.CASCADE)
class TaggedPage(Page):
tags = ClusterTaggableManager(through=TaggedPageTag, blank=True)
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
class TaggedChildPage(TaggedPage):
pass
class TaggedGrandchildPage(TaggedChildPage):
pass
class SingletonPage(Page):
@classmethod
def can_create_at(cls, parent):
# You can only create one of these!
return super(SingletonPage, cls).can_create_at(parent) \
and not cls.objects.exists()
class SingletonPageViaMaxCount(Page):
max_count = 1
class PageChooserModel(models.Model):
page = models.ForeignKey('wagtailcore.Page', help_text='help text', on_delete=models.CASCADE)
class EventPageChooserModel(models.Model):
page = models.ForeignKey('tests.EventPage', help_text='more help text', on_delete=models.CASCADE)
class SnippetChooserModel(models.Model):
advert = models.ForeignKey(Advert, help_text='help text', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('advert'),
]
class SnippetChooserModelWithCustomPrimaryKey(models.Model):
advertwithcustomprimarykey = models.ForeignKey(AdvertWithCustomPrimaryKey, help_text='help text', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('advertwithcustomprimarykey'),
]
class CustomImage(AbstractImage):
caption = models.CharField(max_length=255, blank=True)
fancy_caption = RichTextField(blank=True)
not_editable_field = models.CharField(max_length=255, blank=True)
admin_form_fields = Image.admin_form_fields + (
'caption',
'fancy_caption',
)
class Meta:
unique_together = [
('title', 'collection')
]
class CustomRendition(AbstractRendition):
image = models.ForeignKey(CustomImage, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
# Custom image model with a required field
class CustomImageWithAuthor(AbstractImage):
author = models.CharField(max_length=255)
admin_form_fields = Image.admin_form_fields + (
'author',
)
class CustomRenditionWithAuthor(AbstractRendition):
image = models.ForeignKey(CustomImageWithAuthor, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
class CustomDocument(AbstractDocument):
description = models.TextField(blank=True)
fancy_description = RichTextField(blank=True)
admin_form_fields = Document.admin_form_fields + (
'description',
'fancy_description'
)
class Meta:
unique_together = [
('title', 'collection')
]
# Custom document model with a required field
class CustomDocumentWithAuthor(AbstractDocument):
author = models.CharField(max_length=255)
admin_form_fields = Document.admin_form_fields + (
'author',
)
class StreamModel(models.Model):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
class MinMaxCountStreamModel(models.Model):
body = StreamField(
[
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
],
min_num=2, max_num=5
)
class BlockCountsStreamModel(models.Model):
body = StreamField(
[
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
],
block_counts={
"text": {"min_num": 1},
"rich_text": {"max_num": 1},
"image": {"min_num": 1, "max_num": 1},
}
)
class ExtendedImageChooserBlock(ImageChooserBlock):
"""
Example of Block with custom get_api_representation method.
If the request has an 'extended' query param, it returns a dict of id and title,
otherwise, it returns the default value.
"""
def get_api_representation(self, value, context=None):
image_id = super().get_api_representation(value, context=context)
if 'request' in context and context['request'].query_params.get('extended', False):
return {
'id': image_id,
'title': value.title
}
return image_id
class StreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ExtendedImageChooserBlock()),
('product', StructBlock([
('name', CharBlock()),
('price', CharBlock()),
])),
('raw_html', RawHTMLBlock()),
('books', StreamBlock([
('title', CharBlock()),
('author', CharBlock()),
])),
])
api_fields = ('body',)
content_panels = [
FieldPanel('title'),
StreamFieldPanel('body'),
]
preview_modes = []
class DefaultStreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
], default='')
content_panels = [
FieldPanel('title'),
StreamFieldPanel('body'),
]
class MTIBasePage(Page):
is_creatable = False
class Meta:
verbose_name = "MTI Base page"
class MTIChildPage(MTIBasePage):
# Should be creatable by default, no need to set anything
pass
class AbstractPage(Page):
class Meta:
abstract = True
@register_setting
class TestSetting(BaseSetting):
title = models.CharField(max_length=100)
email = models.EmailField(max_length=50)
@register_setting
class ImportantPages(BaseSetting):
sign_up_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
general_terms_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
privacy_policy_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
@register_setting(icon="tag")
class IconSetting(BaseSetting):
pass
class NotYetRegisteredSetting(BaseSetting):
pass
@register_setting
class FileUploadSetting(BaseSetting):
file = models.FileField()
class BlogCategory(models.Model):
name = models.CharField(unique=True, max_length=80)
class BlogCategoryBlogPage(models.Model):
category = models.ForeignKey(BlogCategory, related_name="+", on_delete=models.CASCADE)
page = ParentalKey('ManyToManyBlogPage', related_name='categories', on_delete=models.CASCADE)
panels = [
FieldPanel('category'),
]
class ManyToManyBlogPage(Page):
"""
A page type with two different kinds of M2M relation.
We don't formally support these, but we don't want them to cause
hard breakages either.
"""
body = RichTextField(blank=True)
adverts = models.ManyToManyField(Advert, blank=True)
blog_categories = models.ManyToManyField(
BlogCategory, through=BlogCategoryBlogPage, blank=True)
# make first_published_at editable on this page model
settings_panels = Page.settings_panels + [
FieldPanel('first_published_at'),
]
class OneToOnePage(Page):
"""
A Page containing a O2O relation.
"""
body = RichTextBlock(blank=True)
page_ptr = models.OneToOneField(Page, parent_link=True,
related_name='+', on_delete=models.CASCADE)
class GenericSnippetPage(Page):
"""
A page containing a reference to an arbitrary snippet (or any model for that matter)
linked by a GenericForeignKey
"""
snippet_content_type = models.ForeignKey(ContentType, on_delete=models.SET_NULL, null=True)
snippet_object_id = models.PositiveIntegerField(null=True)
snippet_content_object = GenericForeignKey('snippet_content_type', 'snippet_object_id')
class CustomImageFilePath(AbstractImage):
def get_upload_to(self, filename):
"""Create a path that's file-system friendly.
By hashing the file's contents we guarantee an equal distribution
of files within our root directories. This also gives us a
better chance of uploading images with the same filename, but
different contents - this isn't guaranteed as we're only using
the first three characters of the checksum.
"""
original_filepath = super().get_upload_to(filename)
folder_name, filename = original_filepath.split(os.path.sep)
# Ensure that we consume the entire file, we can't guarantee that
# the stream has not be partially (or entirely) consumed by
# another process
original_position = self.file.tell()
self.file.seek(0)
hash256 = hashlib.sha256()
while True:
data = self.file.read(256)
if not data:
break
hash256.update(data)
checksum = hash256.hexdigest()
self.file.seek(original_position)
return os.path.join(folder_name, checksum[:3], filename)
class CustomPageQuerySet(PageQuerySet):
def about_spam(self):
return self.filter(title__contains='spam')
CustomManager = PageManager.from_queryset(CustomPageQuerySet)
class CustomManagerPage(Page):
objects = CustomManager()
class MyBasePage(Page):
"""
A base Page model, used to set site-wide defaults and overrides.
"""
objects = CustomManager()
class Meta:
abstract = True
class MyCustomPage(MyBasePage):
pass
class ValidatedPage(Page):
foo = models.CharField(max_length=255)
base_form_class = ValidatedPageForm
content_panels = Page.content_panels + [
FieldPanel('foo'),
]
class DefaultRichTextFieldPage(Page):
body = RichTextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
class DefaultRichBlockFieldPage(Page):
body = StreamField([
('rich_text', RichTextBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body')
]
class CustomRichTextFieldPage(Page):
body = RichTextField(editor='custom')
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
class CustomRichBlockFieldPage(Page):
body = StreamField([
('rich_text', RichTextBlock(editor='custom')),
])
content_panels = [
FieldPanel('title', classname="full title"),
StreamFieldPanel('body'),
]
class RichTextFieldWithFeaturesPage(Page):
body = RichTextField(features=['quotation', 'embed', 'made-up-feature'])
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
# a page that only contains RichTextField within an InlinePanel,
# to test that the inline child's form media gets pulled through
class SectionedRichTextPageSection(Orderable):
page = ParentalKey('tests.SectionedRichTextPage', related_name='sections', on_delete=models.CASCADE)
body = RichTextField()
panels = [
FieldPanel('body')
]
class SectionedRichTextPage(Page):
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('sections')
]
class InlineStreamPageSection(Orderable):
page = ParentalKey('tests.InlineStreamPage', related_name='sections', on_delete=models.CASCADE)
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
panels = [
StreamFieldPanel('body')
]
class InlineStreamPage(Page):
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('sections')
]
class TableBlockStreamPage(Page):
table = StreamField([('table', TableBlock())])
content_panels = [StreamFieldPanel('table')]
class UserProfile(models.Model):
# Wagtail's schema must be able to coexist alongside a custom UserProfile model
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
favourite_colour = models.CharField(max_length=255)
class PanelSettings(TestSetting):
panels = [
FieldPanel('title')
]
class TabbedSettings(TestSetting):
edit_handler = TabbedInterface([
ObjectList([
FieldPanel('title')
], heading='First tab'),
ObjectList([
FieldPanel('email')
], heading='Second tab'),
])
class AlwaysShowInMenusPage(Page):
show_in_menus_default = True
# test for AddField migrations on StreamFields using various default values
class AddedStreamFieldWithoutDefaultPage(Page):
body = StreamField([
('title', CharBlock())
])
class AddedStreamFieldWithEmptyStringDefaultPage(Page):
body = StreamField([
('title', CharBlock())
], default='')
class AddedStreamFieldWithEmptyListDefaultPage(Page):
body = StreamField([
('title', CharBlock())
], default=[])
# test customising edit handler definitions on a per-request basis
class PerUserContentPanels(ObjectList):
def _replace_children_with_per_user_config(self):
self.children = self.instance.basic_content_panels
if self.request.user.is_superuser:
self.children = self.instance.superuser_content_panels
self.children = [
child.bind_to(model=self.model, instance=self.instance,
request=self.request, form=self.form)
for child in self.children]
def on_instance_bound(self):
# replace list of children when both instance and request are available
if self.request:
self._replace_children_with_per_user_config()
else:
super().on_instance_bound()
def on_request_bound(self):
# replace list of children when both instance and request are available
if self.instance:
self._replace_children_with_per_user_config()
else:
super().on_request_bound()
class PerUserPageMixin:
basic_content_panels = []
superuser_content_panels = []
@cached_classmethod
def get_edit_handler(cls):
tabs = []
if cls.basic_content_panels and cls.superuser_content_panels:
tabs.append(PerUserContentPanels(heading='Content'))
if cls.promote_panels:
tabs.append(ObjectList(cls.promote_panels,
heading='Promote'))
if cls.settings_panels:
tabs.append(ObjectList(cls.settings_panels,
heading='Settings',
classname='settings'))
edit_handler = TabbedInterface(tabs,
base_form_class=cls.base_form_class)
return edit_handler.bind_to(model=cls)
class SecretPage(PerUserPageMixin, Page):
boring_data = models.TextField()
secret_data = models.TextField()
basic_content_panels = Page.content_panels + [
FieldPanel('boring_data'),
]
superuser_content_panels = basic_content_panels + [
FieldPanel('secret_data'),
]
class SimpleParentPage(Page):
# `BusinessIndex` has been added to bring it in line with other tests
subpage_types = ['tests.SimpleChildPage', BusinessIndex]
class SimpleChildPage(Page):
# `Page` has been added to bring it in line with other tests
parent_page_types = ['tests.SimpleParentPage', Page]
max_count_per_parent = 1
class PersonPage(Page):
first_name = models.CharField(
max_length=255,
verbose_name='First Name',
)
last_name = models.CharField(
max_length=255,
verbose_name='Last Name',
)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('first_name'),
FieldPanel('last_name'),
], 'Person'),
InlinePanel('addresses', label='Address'),
]
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'Persons'
class Address(index.Indexed, ClusterableModel, Orderable):
address = models.CharField(
max_length=255,
verbose_name='Address',
)
tags = ClusterTaggableManager(
through='tests.AddressTag',
blank=True,
)
person = ParentalKey(
to='tests.PersonPage',
related_name='addresses',
verbose_name='Person'
)
panels = [
FieldPanel('address'),
FieldPanel('tags'),
]
class Meta:
verbose_name = 'Address'
verbose_name_plural = 'Addresses'
class AddressTag(TaggedItemBase):
content_object = ParentalKey(
to='tests.Address',
on_delete=models.CASCADE,
related_name='tagged_items'
)
class RestaurantPage(Page):
tags = ClusterTaggableManager(through='tests.TaggedRestaurant', blank=True)
content_panels = Page.content_panels + [
FieldPanel('tags'),
]
class RestaurantTag(TagBase):
free_tagging = False
class Meta:
verbose_name = "Tag"
verbose_name_plural = "Tags"
class TaggedRestaurant(ItemBase):
tag = models.ForeignKey(
RestaurantTag, related_name="tagged_restaurants", on_delete=models.CASCADE
)
content_object = ParentalKey(
to='tests.RestaurantPage',
on_delete=models.CASCADE,
related_name='tagged_items'
)
class SimpleTask(Task):
pass
# StreamField media definitions must not be evaluated at startup (e.g. during system checks) -
# these may fail if e.g. ManifestStaticFilesStorage is in use and collectstatic has not been run.
# Check this with a media definition that deliberately errors; if media handling is not set up
# correctly, then the mere presence of this model definition will cause startup to fail.
class DeadlyTextInput(forms.TextInput):
@property
def media(self):
raise Exception("BOOM! Attempted to evaluate DeadlyTextInput.media")
class DeadlyCharBlock(FieldBlock):
def __init__(self, *args, **kwargs):
self.field = forms.CharField(widget=DeadlyTextInput())
super().__init__(*args, **kwargs)
class DeadlyStreamPage(Page):
body = StreamField([
('title', DeadlyCharBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
# Check that get_image_model and get_document_model work at import time
# (so that it's possible to use them in foreign key definitions, for example)
ReimportedImageModel = get_image_model()
ReimportedDocumentModel = get_document_model()
|
|
import collections
import operator
import pytest
from pandas.compat import PY36
import pandas as pd
from pandas.tests.extension import base
import pandas.util.testing as tm
from .array import JSONArray, JSONDtype, make_data
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercible to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {"a": 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{"b": 1}, {"c": 4}, {"a": 2, "c": 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{"b": 1}, {}, {"a": 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray(
[
{"b": 1},
{"b": 1},
{},
{},
{"a": 0, "c": 2},
{"a": 0, "c": 2},
{"b": 1},
{"c": 2},
]
)
class BaseJSON:
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
def assert_series_equal(self, left, right, **kwargs):
if left.dtype.name == "json":
assert left.dtype == right.dtype
left = pd.Series(
JSONArray(left.values.astype(object)), index=left.index, name=left.name
)
right = pd.Series(
JSONArray(right.values.astype(object)),
index=right.index,
name=right.name,
)
tm.assert_series_equal(left, right, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
tm.assert_index_equal(
left.columns,
right.columns,
exact=kwargs.get("check_column_type", "equiv"),
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
obj="{obj}.columns".format(obj=kwargs.get("obj", "DataFrame")),
)
jsons = (left.dtypes == "json").index
for col in jsons:
self.assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray(
[
collections.UserDict({"a": 1}),
collections.UserDict({"b": 2}),
collections.UserDict({"c": 3}),
]
)
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
with pytest.raises(AssertionError):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError):
self.assert_frame_equal(a.to_frame(), b.to_frame())
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
@pytest.mark.skip(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""
@pytest.mark.xfail(reason="dict for NA")
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
# this matches otherwise
return super().test_unstack(data, index)
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
unstable = pytest.mark.skipif(
not PY36, reason="Dictionary order unstable" # 3.6 or higher
)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
@unstable
def test_argsort(self, data_for_sorting):
super().test_argsort(data_for_sorting)
@unstable
def test_argsort_missing(self, data_missing_for_sorting):
super().test_argsort_missing(data_missing_for_sorting)
@unstable
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending):
super().test_sort_values(data_for_sorting, ascending)
@unstable
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super().test_sort_values_missing(data_missing_for_sorting, ascending)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_first(self, data):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
@pytest.mark.skip(reason="broadcasting error")
def test_where_series(self, data, na_value):
# Fails with
# *** ValueError: operands could not be broadcast together
# with shapes (4,) (4,) (0,)
super().test_where_series(data, na_value)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_searchsorted(self, data_for_sorting):
super().test_searchsorted(data_for_sorting)
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@unstable
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
pass
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="unsupported"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
class TestPrinting(BaseJSON, base.BasePrintingTests):
pass
|
|
from __future__ import absolute_import
from six.moves.urllib.parse import urlparse
from django.utils.translation import ugettext_lazy as _
from django import forms
from sentry import http
from sentry.web.helpers import render_to_response
from sentry.identity.pipeline import IdentityProviderPipeline
from sentry.identity.github_enterprise import get_user_info
from sentry.integrations import IntegrationMetadata, Integration
from sentry.integrations.constants import ERR_INTERNAL, ERR_UNAUTHORIZED
from sentry.integrations.exceptions import ApiError
from sentry.integrations.repositories import RepositoryMixin
from sentry.pipeline import NestedPipelineView, PipelineView
from sentry.utils.http import absolute_uri
from sentry.integrations.github.integration import GitHubIntegrationProvider
from sentry.integrations.github.issues import GitHubIssueBasic
from sentry.integrations.github.utils import get_jwt
from .repository import GitHubEnterpriseRepositoryProvider
from .client import GitHubEnterpriseAppsClient
DESCRIPTION = """
Define a relationship between Sentry and GitHub Enterprise.
* Authorize repositories to be added for syncing commit data.
* Create or link existing GitHub Enterprise issues.
"""
metadata = IntegrationMetadata(
description=DESCRIPTION.strip(),
author='The Sentry Team',
noun=_('Installation'),
issue_url='https://github.com/getsentry/sentry/issues/new?title=GitHub%20Integration:%20&labels=Component%3A%20Integrations',
source_url='https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/github_enterprise',
aspects={}
)
API_ERRORS = {
404: 'GitHub Enterprise returned a 404 Not Found error.',
401: ERR_UNAUTHORIZED,
}
class GitHubEnterpriseIntegration(Integration, GitHubIssueBasic, RepositoryMixin):
def get_client(self):
base_url = urlparse(self.model.metadata['domain_name']).netloc
return GitHubEnterpriseAppsClient(
base_url=base_url,
integration=self.model,
private_key=self.model.metadata['installation']['private_key'],
app_id=self.model.metadata['installation']['id'],
)
def get_repositories(self):
return self.get_client().get_repositories()
def message_from_error(self, exc):
if isinstance(exc, ApiError):
message = API_ERRORS.get(exc.code)
if message:
return message
return (
'Error Communicating with GitHub Enterprise (HTTP %s): %s' % (
exc.code, exc.json.get('message', 'unknown error')
if exc.json else 'unknown error',
)
)
else:
return ERR_INTERNAL
class InstallationForm(forms.Form):
url = forms.CharField(widget=forms.TextInput(
attrs={
'label': "Installation Url",
'placeholder': _('https://github.example.com'),
}
))
id = forms.CharField(widget=forms.TextInput(
attrs={
'label': "Github App ID",
'placeholder': _('1'),
}
))
name = forms.CharField(widget=forms.TextInput(
attrs={
'label': "Github App Name",
'placeholder': _('sentry-app'),
}
))
client_id = forms.CharField(widget=forms.TextInput(
attrs={
'label': "Github App Client ID",
'placeholder': _('1'),
}
))
client_secret = forms.CharField(widget=forms.TextInput(
attrs={
'label': "Github App Client Secret",
'placeholder': _('XXXXXXXXXXXXXXXXXXXXXXXXXXX'),
}
))
webhook_secret = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'label': "Github App Webhook Secret",
'placeholder': _('XXXXXXXXXXXXXXXXXXXXXXXXXXX'),
}
))
private_key = forms.CharField(
widget=forms.Textarea(attrs={'rows': '60',
'label': "Github App Private Key",
'placeholder': _("-----BEGIN RSA PRIVATE KEY-----\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"-----END RSA PRIVATE KEY-----"), }
))
def __init__(self, *args, **kwargs):
super(InstallationForm, self).__init__(*args, **kwargs)
class InstallationConfigView(PipelineView):
def dispatch(self, request, pipeline):
form = InstallationForm(request.POST)
if form.is_valid():
form_data = form.cleaned_data
form_data['url'] = urlparse(form_data['url']).netloc
pipeline.bind_state('installation_data', form_data)
pipeline.bind_state('oauth_config_information', {
"access_token_url": "https://{}/login/oauth/access_token".format(form_data.get('url')),
"authorize_url": "https://{}/login/oauth/authorize".format(form_data.get('url')),
"client_id": form_data.get('client_id'),
"client_secret": form_data.get('client_secret'),
})
return pipeline.next_step()
project_form = InstallationForm()
return render_to_response(
template='sentry/integrations/github-enterprise-config.html',
context={
'form': project_form,
},
request=request,
)
class GitHubEnterpriseIntegrationProvider(GitHubIntegrationProvider):
key = 'github_enterprise'
name = 'GitHub Enterprise'
metadata = metadata
integration_cls = GitHubEnterpriseIntegration
def _make_identity_pipeline_view(self):
"""
Make the nested identity provider view. It is important that this view is
not constructed until we reach this step and the
``oauth_config_information`` is available in the pipeline state. This
method should be late bound into the pipeline vies.
"""
identity_pipeline_config = dict(
oauth_scopes=(),
redirect_url=absolute_uri('/extensions/github-enterprise/setup/'),
verify_ssl=False,
**self.pipeline.fetch_state('oauth_config_information')
)
return NestedPipelineView(
bind_key='identity',
provider_key='github_enterprise',
pipeline_cls=IdentityProviderPipeline,
config=identity_pipeline_config,
)
def get_pipeline_views(self):
return [InstallationConfigView(),
GitHubEnterpriseInstallationRedirect(),
# The identity provider pipeline should be constructed at execution
# time, this allows for the oauth configuration parameters to be made
# available from the installation config view.
lambda: self._make_identity_pipeline_view()]
def get_installation_info(self, installation_data, access_token, installation_id):
session = http.build_session()
resp = session.get(
'https://{}/api/v3/app/installations/{}'.format(
installation_data['url'], installation_id),
headers={
'Authorization': 'Bearer %s' % get_jwt(github_id=installation_data['id'], github_private_key=installation_data['private_key']),
'Accept': 'application/vnd.github.machine-man-preview+json',
},
verify=False
)
resp.raise_for_status()
installation_resp = resp.json()
resp = session.get(
'https://{}/api/v3/user/installations'.format(installation_data['url']),
params={'access_token': access_token},
headers={'Accept': 'application/vnd.github.machine-man-preview+json'},
verify=False
)
resp.raise_for_status()
user_installations_resp = resp.json()
# verify that user actually has access to the installation
for installation in user_installations_resp['installations']:
if installation['id'] == installation_resp['id']:
return installation_resp
return None
def build_integration(self, state):
identity = state['identity']['data']
installation_data = state['installation_data']
user = get_user_info(installation_data['url'], identity['access_token'])
installation = self.get_installation_info(
installation_data,
identity['access_token'],
state['installation_id'])
domain = urlparse(installation['account']['html_url']).netloc
return {
'name': installation['account']['login'],
# installation id is not enough to be unique for self-hosted GH
'external_id': '{}:{}'.format(domain, installation['id']),
# GitHub identity is associated directly to the application, *not*
# to the installation itself.
# app id is not enough to be unique for self-hosted GH
'idp_external_id': '{}:{}'.format(domain, installation['app_id']),
'metadata': {
# The access token will be populated upon API usage
'access_token': None,
'expires_at': None,
'icon': installation['account']['avatar_url'],
'domain_name': installation['account']['html_url'],
'installation_id': installation['id'],
'installation': installation_data
},
'user_identity': {
'type': 'github_enterprise',
'external_id': user['id'],
'scopes': [], # GitHub apps do not have user scopes
'data': {'access_token': identity['access_token']},
},
'idp_config': state['oauth_config_information']
}
def setup(self):
from sentry.plugins import bindings
bindings.add(
'integration-repository.provider',
GitHubEnterpriseRepositoryProvider,
id='integrations:github_enterprise',
)
class GitHubEnterpriseInstallationRedirect(PipelineView):
def get_app_url(self, installation_data):
url = installation_data.get('url')
name = installation_data.get('name')
return 'https://{}/github-apps/{}'.format(url, name)
def dispatch(self, request, pipeline):
installation_data = pipeline.fetch_state(key='installation_data')
if 'installation_id' in request.GET:
pipeline.bind_state('installation_id', request.GET['installation_id'])
return pipeline.next_step()
return self.redirect(self.get_app_url(installation_data))
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves content for "script" handlers using the PHP runtime."""
import cgi
import logging
import os
import re
import subprocess
import sys
import google
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import safe_subprocess
_RUNTIME_PATH = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '_php_runtime.py')
)
_CHECK_ENVIRONMENT_SCRIPT_PATH = os.path.join(
os.path.dirname(__file__), 'php', 'check_environment.php')
_RUNTIME_ARGS = [sys.executable, _RUNTIME_PATH]
class _PHPBinaryError(Exception):
pass
class _PHPEnvironmentError(Exception):
pass
class _BadPHPEnvironmentRuntimeProxy(instance.RuntimeProxy):
"""Serves an error page describing the problem with the user's PHP setup."""
def __init__(self, php_executable_path, exception):
self._php_executable_path = php_executable_path
self._exception = exception
def start(self):
pass
def quit(self):
pass
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Serves a request by displaying an error page.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Yields:
A sequence of strings containing the body of the HTTP response.
"""
start_response('500 Internal Server Error',
[('Content-Type', 'text/html')])
yield '<html><head><title>Invalid PHP Configuration</title></head>'
yield '<body>'
yield '<title>Invalid PHP Configuration</title>'
if isinstance(self._exception, _PHPEnvironmentError):
yield '<b>The PHP interpreter specified with the --php_executable_path '
yield ' flag ("%s") is not compatible with the App Engine ' % (
self._php_executable_path)
yield 'PHP development environment.</b><br>'
yield '<br>'
yield '<pre>%s</pre>' % self._exception
else:
yield '<b>%s</b>' % cgi.escape(str(self._exception))
yield '</body></html>'
class PHPRuntimeInstanceFactory(instance.InstanceFactory):
"""A factory that creates new PHP runtime Instances."""
# A mapping from a php executable path to the _BadPHPEnvironmentRuntimeProxy
# descriping why it is not useable. If the php executable is usable then the
# path will map to None. Only one PHP executable will be used in a run of the
# development server but that is not necessarily the case for tests.
_php_binary_to_error_proxy = {}
# TODO: Use real script values.
START_URL_MAP = appinfo.URLMap(
url='/_ah/start',
script='$PHP_LIB/default_start_handler',
login='admin')
WARMUP_URL_MAP = appinfo.URLMap(
url='/_ah/warmup',
script='$PHP_LIB/default_warmup_handler',
login='admin')
FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.NEVER
def __init__(self, request_data, runtime_config_getter, module_configuration):
"""Initializer for PHPRuntimeInstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
runtime_config_getter: A function that can be called without arguments
and returns the runtime_config_pb2.Config containing the configuration
for the runtime.
module_configuration: An application_configuration.ModuleConfiguration
instance respresenting the configuration of the module that owns the
runtime.
"""
super(PHPRuntimeInstanceFactory, self).__init__(
request_data, 8 if runtime_config_getter().threadsafe else 1)
self._runtime_config_getter = runtime_config_getter
self._module_configuration = module_configuration
self._bad_environment_proxy = None
@staticmethod
def _check_environment(php_executable_path):
if php_executable_path is None:
raise _PHPBinaryError('The development server must be started with the '
'--php_executable_path flag set to the path of the '
'php-cgi binary.')
if not os.path.exists(php_executable_path):
raise _PHPBinaryError('The path specified with the --php_exectuable_path '
'flag (%s) does not exist.' % php_executable_path)
if not os.access(php_executable_path, os.X_OK):
raise _PHPBinaryError('The path specified with the --php_exectuable_path '
'flag (%s) is not executable' % php_executable_path)
version_process = safe_subprocess.start_process([php_executable_path, '-v'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
version_stdout, version_stderr = version_process.communicate()
if version_process.returncode:
raise _PHPEnvironmentError(
'"%s -v" returned an error [%d]\n%s%s' % (
php_executable_path,
version_process.returncode,
version_stderr,
version_stdout))
version_match = re.search(r'PHP (\d+).(\d+)', version_stdout)
if version_match is None:
raise _PHPEnvironmentError(
'"%s -v" returned an unexpected version string:\n%s%s' % (
php_executable_path,
version_stderr,
version_stdout))
version = tuple(int(v) for v in version_match.groups())
if version < (5, 4):
raise _PHPEnvironmentError(
'The PHP interpreter must be version >= 5.4, %d.%d found' % version)
check_process = safe_subprocess.start_process(
[php_executable_path, '-f', _CHECK_ENVIRONMENT_SCRIPT_PATH],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
check_process_stdout, _ = check_process.communicate()
if check_process.returncode:
raise _PHPEnvironmentError(check_process_stdout)
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
def instance_config_getter():
runtime_config = self._runtime_config_getter()
runtime_config.instance_id = str(instance_id)
return runtime_config
php_executable_path = (
self._runtime_config_getter().php_config.php_executable_path)
if php_executable_path not in self._php_binary_to_error_proxy:
try:
self._check_environment(php_executable_path)
except Exception as e:
self._php_binary_to_error_proxy[php_executable_path] = (
_BadPHPEnvironmentRuntimeProxy(php_executable_path, e))
logging.exception('The PHP runtime is not available')
else:
self._php_binary_to_error_proxy[php_executable_path] = None
proxy = self._php_binary_to_error_proxy[php_executable_path]
if proxy is None:
proxy = http_runtime.HttpRuntimeProxy(_RUNTIME_ARGS,
instance_config_getter,
self._module_configuration)
return instance.Instance(self.request_data,
instance_id,
proxy,
self.max_concurrent_requests,
self.max_background_threads,
expect_ready_request)
|
|
from __future__ import absolute_import, unicode_literals
from collections import Counter
from itertools import chain, groupby
import json
from operator import itemgetter
import numpy as np
import pytz
from django.conf import settings
from django.db import models, transaction
from django.db.models import Count, Q, F
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from tracpro.charts.utils import midnight, end_of_day
from tracpro.client import get_client
from tracpro.contacts.models import Contact, NoMatchingCohortsWarning, NoContactInRapidProWarning
from tracpro.utils import dunder_to_chained_attrs
from . import rules
from .tasks import pollrun_start
from .utils import extract_words, natural_sort_key, get_numeric_values
SAMEDAY_LAST = 'use_last'
SAMEDAY_SUM = 'sum'
class PollQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
def by_org(self, org):
return self.filter(org=org)
class PollManager(models.Manager.from_queryset(PollQuerySet)):
def from_temba(self, org, temba_poll):
"""
Create new or update existing Poll from RapidPro data.
:param TembaFlow temba_poll:
"""
poll, _ = self.get_or_create(org=org, flow_uuid=temba_poll.uuid)
if poll.name == poll.rapidpro_name:
# Name is tracking RapidPro name so we must update both.
poll.name = poll.rapidpro_name = temba_poll.name
else:
# Custom name will be maintained despite update of RapidPro name.
poll.rapidpro_name = temba_poll.name
poll.save()
return poll
@transaction.atomic
def set_active_for_org(self, org, uuids):
"""Set matching org Polls to be active, and all others to be inactive.
If an invalid UUID is given, a ValueError is raised and the transaction
is rolled back.
"""
active_count = org.polls.filter(flow_uuid__in=uuids).update(is_active=True)
if active_count != len(uuids):
invalid_uuids = set(uuids) - set(org.polls.values_list('flow_uuid', flat=True))
raise ValueError(
"No Poll for {} matching these UUIDS: {}".format(
org.name, invalid_uuids))
org.polls.exclude(flow_uuid__in=uuids).update(is_active=False)
def sync(self, org):
"""Update the org's Polls from RapidPro."""
# Retrieve current Polls known to RapidPro.
temba_polls_result = get_client(org).get_flows()
# Filter out polls with names starting with 'Single Message'
temba_polls = {}
for poll in temba_polls_result:
if not poll.name.startswith('Single Message'):
temba_polls[poll.uuid] = poll
# Remove Polls that are no longer on RapidPro or that we are filtering out.
org.polls.exclude(flow_uuid__in=temba_polls.keys()).delete()
# Create new or update existing Polls to match RapidPro data.
for temba_poll in temba_polls.values():
Poll.objects.from_temba(org, temba_poll)
@python_2_unicode_compatible
class Poll(models.Model):
"""Corresponds to a RapidPro flow.
Keeping track of contact responses to a flow is data-intensive, so Tracpro
only tracks flows that the user has selected. Selected flows are managed
as Polls.
"""
flow_uuid = models.CharField(max_length=36)
org = models.ForeignKey(
'orgs.Org', related_name='polls', verbose_name=_('org'))
rapidpro_name = models.CharField(
max_length=64, verbose_name=_('RapidPro name'))
name = models.CharField(
max_length=64, blank=True, verbose_name=_('name'))
# Set this to False rather than deleting a Poll. If the user should
# re-select the corresponding flow later, we can avoid re-importing
# existing data.
is_active = models.BooleanField(
default=False, verbose_name=_("show on TracPro"))
objects = PollManager()
class Meta:
unique_together = (
('org', 'flow_uuid'),
)
def __init__(self, *args, **kwargs):
"""Name should default to the RapidPro name."""
super(Poll, self).__init__(*args, **kwargs)
self.name = self.name or self.rapidpro_name
def __str__(self):
return self.name
def save(self, *args, **kwargs):
"""Don't save custom name if it is the same as the RapidPro name.
This allows us to track changes to the name on RapidPro.
"""
self.name = "" if self.name == self.rapidpro_name else self.name.strip()
super(Poll, self).save(*args, **kwargs)
self.name = self.name or self.rapidpro_name
class QuestionQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
class QuestionManager(models.Manager.from_queryset(QuestionQuerySet)):
def from_temba(self, poll, temba_question, order):
"""Create new or update existing Question from RapidPro data."""
question, _ = self.get_or_create(poll=poll, ruleset_uuid=temba_question['uuid'])
if question.name == question.rapidpro_name:
# Name is tracking RapidPro name so we must update both.
question.name = question.rapidpro_name = temba_question['label']
else:
# Custom name will be maintained despite update of RapidPro name.
question.rapidpro_name = temba_question['label']
# Save the rules used to categorize answers to this question.
rules = []
for rule in temba_question['rules'][:-1]: # The last rule is always "Other".
rules.append({
'category': rule['category'],
'test': rule['test'],
})
question.json_rules = json.dumps(rules)
# The user can alter or correct the question's type after it is
# initially set, so we shouldn't override the existing type.
if not question.question_type:
question.question_type = question.guess_question_type()
question.order = order
question.save()
return question
@python_2_unicode_compatible
class Question(models.Model):
"""Corresponds to RapidPro RuleSet."""
TYPE_OPEN = 'O'
TYPE_MULTIPLE_CHOICE = 'C'
TYPE_NUMERIC = 'N'
TYPE_MENU = 'M'
TYPE_KEYPAD = 'K'
TYPE_RECORDING = 'R'
TYPE_CHOICES = (
(TYPE_OPEN, _("Open Ended")),
(TYPE_MULTIPLE_CHOICE, _("Multiple Choice")),
(TYPE_NUMERIC, _("Numeric")),
(TYPE_MENU, _("Menu")),
(TYPE_KEYPAD, _("Keypad")),
(TYPE_RECORDING, _("Recording")),
)
ruleset_uuid = models.CharField(max_length=36)
poll = models.ForeignKey(
'polls.Poll', related_name='questions', verbose_name=_('poll'))
rapidpro_name = models.CharField(
max_length=64, verbose_name=_('RapidPro name'))
name = models.CharField(
max_length=64, blank=True, verbose_name=_('name'))
question_type = models.CharField(
max_length=1, choices=TYPE_CHOICES, verbose_name=_('question type'))
order = models.IntegerField(
default=0, verbose_name=_('order'))
is_active = models.BooleanField(
default=True, verbose_name=_("show on TracPro"))
json_rules = models.TextField(
blank=True,
verbose_name=_("RapidPro rules"))
objects = QuestionManager()
class Meta:
ordering = ('order',)
unique_together = (
('ruleset_uuid', 'poll'),
)
def __init__(self, *args, **kwargs):
"""Name should default to the RapidPro name."""
super(Question, self).__init__(*args, **kwargs)
self.name = self.name or self.rapidpro_name
def __str__(self):
return self.name
def categorize(self, value):
"""Return the first category that the value matches."""
for rule in self.get_rules():
if rules.passes_test(value, rule):
return rules.get_category(rule)
return "Other"
def get_rules(self):
if not hasattr(self, "_rules"):
self._rules = json.loads(self.json_rules) if self.json_rules else []
return self._rules
def guess_question_type(self):
"""Inspect rules applied to question input to guess data type.
Historically, the "response_type" field on the ruleset was used to
determine question type. This field appears to have been deprecated
and currently returns from a limited subset of possible types.
"""
# Collect the type of each test applied to question input, e.g.,
# "has any of these words", "has a number", "has a number between", etc.
tests = [rule['test']['type'] for rule in self.get_rules()]
if not tests:
return self.TYPE_OPEN
elif all(t in rules.NUMERIC_TESTS for t in tests):
return self.TYPE_NUMERIC
else:
return self.TYPE_MULTIPLE_CHOICE
def save(self, *args, **kwargs):
"""Don't save custom name if it is the same as the RapidPro name.
This allows us to track changes to the name on RapidPro.
"""
self.name = "" if self.name == self.rapidpro_name else self.name.strip()
super(Question, self).save(*args, **kwargs)
self.name = self.name or self.rapidpro_name
class PollRunQuerySet(models.QuerySet):
def active(self):
"""Return all active PollRuns."""
return self.filter(poll__is_active=True)
def by_dates(self, start_date=None, end_date=None):
pollruns = self.all()
if start_date:
pollruns = pollruns.filter(conducted_on__gte=start_date)
if end_date:
pollruns = pollruns.filter(conducted_on__lt=end_date)
return pollruns
def by_region(self, region, include_subregions=True):
"""Return all PollRuns for the region."""
if not region:
return self.all()
q = Q(region=region)
# Include PollRuns that include this region as a sub-region.
q |= Q(region__in=region.get_ancestors(),
pollrun_type=PollRun.TYPE_PROPAGATED)
# Include poll runs that weren't sent to a particular region.
q |= Q(region=None)
# Include PollRuns that were sent to the region's sub-regions.
if include_subregions:
q |= Q(region__in=region.get_descendants())
return self.filter(q)
def by_org(self, org):
return self.filter(poll__org=org)
def universal(self):
types = (PollRun.TYPE_UNIVERSAL, PollRun.TYPE_SPOOFED)
return self.filter(pollrun_type__in=types)
class PollRunManager(models.Manager.from_queryset(PollRunQuerySet)):
def create(self, poll, region=None, **kwargs):
if region and poll.org != region.org:
raise ValueError("Region org must match poll org.")
return super(PollRunManager, self).create(poll=poll, region=region, **kwargs)
def create_regional(self, region, do_start=True, **kwargs):
"""Create a poll run for a single region."""
if not region:
raise ValueError("Panel poll requires a non-null panel.")
kwargs['pollrun_type'] = PollRun.TYPE_REGIONAL
pollrun = self.create(region=region, **kwargs)
if do_start:
pollrun_start.delay(pollrun.pk)
return pollrun
def create_propagated(self, region, do_start=True, **kwargs):
"""Create a poll run for a region and its sub-regions."""
if not region:
raise ValueError("Propagated poll requires a non-null panel.")
kwargs['pollrun_type'] = PollRun.TYPE_PROPAGATED
pollrun = self.create(region=region, **kwargs)
if do_start:
pollrun_start.delay(pollrun.pk)
return pollrun
def create_spoofed(self, **kwargs):
kwargs['pollrun_type'] = PollRun.TYPE_SPOOFED
return self.create(**kwargs)
def get_or_create_universal(self, poll, for_date=None, **kwargs):
"""Create a poll run that is for all regions."""
# Get the requested date in the org timezone
for_date = for_date or timezone.now()
if isinstance(poll.org.timezone, basestring):
org_timezone = pytz.timezone(poll.org.timezone)
else:
org_timezone = poll.org.timezone
for_local_date = for_date.astimezone(org_timezone).date()
# look for a non-regional pollrun on that date
sql = ('SELECT * FROM polls_pollrun WHERE poll_id = %s AND '
'region_id IS NULL AND DATE(conducted_on AT TIME ZONE %s) = %s')
params = [poll.pk, org_timezone.zone, for_local_date]
existing = list(PollRun.objects.raw(sql, params))
if existing:
return existing[0]
kwargs['poll'] = poll
kwargs['region'] = None
kwargs['pollrun_type'] = PollRun.TYPE_UNIVERSAL
kwargs['conducted_on'] = for_date
return self.create(**kwargs)
def get_all(self, org, region, include_subregions=True):
"""
Get all active PollRuns for the region, plus sub-regions if
specified.
"""
qs = self.get_queryset().active().by_org(org)
qs = qs.select_related('poll', 'region')
qs = qs.by_region(region, include_subregions)
return qs
@python_2_unicode_compatible
class PollRun(models.Model):
"""
Associates polls conducted on the same day.
VERY IMPORTANT: The RapidPro API also has something called a "Run", but
it does *not* correspond directly to this PollRun model. There is a RapidPro
Run record for every contact who responded to a given start of a flow,
while there's a single PollRun in TracPro for all the responses to one start
of a flow.
"""
TYPE_UNIVERSAL = 'u' # Sent to all active regions.
TYPE_SPOOFED = 's' # Universal PollRun created by baseline data spoof.
TYPE_REGIONAL = 'r' # Sent to only one region.
TYPE_PROPAGATED = 'p' # Sent to one region and its sub-regions.
TYPE_CHOICES = (
(TYPE_UNIVERSAL, _('Universal')),
(TYPE_SPOOFED, _('Spoofed')),
(TYPE_REGIONAL, _('Single Panel')),
(TYPE_PROPAGATED, _('Propagated to sub-children')),
)
pollrun_type = models.CharField(
max_length=1, editable=False, choices=TYPE_CHOICES)
poll = models.ForeignKey('polls.Poll', related_name='pollruns')
region = models.ForeignKey(
'groups.Region', blank=True, null=True,
verbose_name=_('panel'),
help_text=_("Panel where the poll was conducted."))
conducted_on = models.DateTimeField(
help_text=_("When the poll was conducted"), default=timezone.now)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, related_name="pollruns_created")
objects = PollRunManager()
def __str__(self):
return "{poll} ({when})".format(
poll=self.poll.name,
when=self.conducted_on.strftime(settings.SITE_DATE_FORMAT),
)
def as_json(self, region=None, include_subregions=True):
return {
'id': self.pk,
'poll': {
'id': self.poll.pk,
'name': self.poll.name,
},
'conducted_on': self.conducted_on,
'region': {'id': self.region.pk, 'name': self.region.name} if self.region else None,
'responses': self.get_response_counts(region, include_subregions),
}
def covers_region(self, region, include_subregions):
"""Return whether this PollRun is related to all given regions."""
if not region or region.pk == self.region_id:
# Shortcut to minimize more expensive queries later.
return True
if self.pollrun_type in (self.TYPE_UNIVERSAL, self.TYPE_SPOOFED):
return True
if self.pollrun_type == self.TYPE_REGIONAL:
if include_subregions:
return region in self.region.get_ancestors()
else: # pragma: nocover
return region == self.region
if self.pollrun_type == self.TYPE_PROPAGATED:
if include_subregions:
return region in self.region.get_family()
else:
return region in self.region.get_descendants()
def get_responses(self, region=None, include_subregions=True,
include_empty=True, include_inactive_responses=False):
"""
Return queryset of all PollRun responses for this region and sub-regions,
omitting the inactive ones (probably earlier versions of responses by the
same contact to the same poll) unless include_inactive_responses is True.
"""
if not self.covers_region(region, include_subregions):
raise ValueError(
"Request for responses in panel where poll wasn't conducted")
# Filter out inactive contacts
responses = self.responses.filter(
contact__region__is_active=True,
contact__is_active=True) # Filter out inactive contacts
if not include_inactive_responses:
# Filter out inactive responses (Multiple responses on same day from same contact)
responses = responses.filter(is_active=True)
if region:
if include_subregions:
regions = region.get_descendants(include_self=True)
responses = responses.filter(contact__region__in=regions)
else:
responses = responses.filter(contact__region=region)
if not include_empty:
responses = responses.exclude(status=Response.STATUS_EMPTY)
return responses.select_related('contact', 'contact__region').prefetch_related('contact__groups')
def get_response_counts(self, region=None, include_subregions=True, include_inactive_responses=False):
"""
Returns dictionary of PollRun response counts for this region and sub-regions.
key = str 'status' field from response
value = int count of responses with that status.
"""
status_counts = self.get_responses(
region, include_subregions, include_inactive_responses=include_inactive_responses)
status_counts = status_counts.values('status')
status_counts = status_counts.annotate(count=Count('status'))
results = {status[0]: 0 for status in Response.STATUS_CHOICES}
results.update({sc['status']: sc['count'] for sc in status_counts})
return results
def is_last_for_region(self, region):
"""Return whether this was the last PollRun conducted in the region.
Includes universal PollRuns.
"""
if not self.covers_region(region, include_subregions=False):
return False
newer_pollruns = PollRun.objects.filter(
poll=self.poll,
conducted_on__gt=self.conducted_on,
).by_region(region, include_subregions=False)
return not newer_pollruns.exists()
class ResponseQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
def group_counts(self, *fields):
"""Group responses by the given fields then map to the count of matching responses."""
responses = self.order_by(*fields).values(*fields)
data = {}
for field_values, _responses in groupby(responses, itemgetter(*fields)):
data[field_values] = len(list(_responses))
return data
class Response(models.Model):
"""Corresponds to RapidPro FlowRun."""
STATUS_EMPTY = 'E'
STATUS_PARTIAL = 'P'
STATUS_COMPLETE = 'C'
STATUS_CHOICES = (
(STATUS_EMPTY, _("Empty")),
(STATUS_PARTIAL, _("Partial")),
(STATUS_COMPLETE, _("Complete")),
)
flow_run_id = models.IntegerField(null=True)
pollrun = models.ForeignKey('polls.PollRun', null=True, related_name='responses')
contact = models.ForeignKey('contacts.Contact', related_name='responses')
created_on = models.DateTimeField(
help_text=_("When this response was created"))
updated_on = models.DateTimeField(
help_text=_("When the last activity on this response was"))
status = models.CharField(
max_length=1, verbose_name=_("Status"), choices=STATUS_CHOICES,
help_text=_("Current status of this response"))
is_active = models.BooleanField(
default=True,
help_text=_("Whether this response is active"))
objects = ResponseQuerySet.as_manager()
class Meta:
unique_together = [
('flow_run_id', 'pollrun'),
]
@classmethod
def create_empty(cls, org, pollrun, run):
"""
Creates an empty response from a run. Used to start or restart a
contact in an existing pollrun.
"""
contact = Contact.get_or_fetch(org, uuid=run.contact)
# de-activate any existing responses for this contact
pollrun.responses.filter(contact=contact).update(is_active=False)
return Response.objects.create(
flow_run_id=run.id, pollrun=pollrun, contact=contact,
created_on=run.created_on, updated_on=run.created_on,
status=Response.STATUS_EMPTY)
@classmethod
def from_run(cls, org, run, poll=None):
"""
Gets or creates a response from a flow run and returns the response.
If response is not up-to-date with provided run, then it is updated.
If the run doesn't match with an existing poll pollrun, it's assumed
to be non-regional.
If a new response has been created, the returned response will have
attribute `is_new` = True.
:param run: temba Run instance
:param poll: tracpro Poll instance, or None
"""
if not poll:
poll = Poll.objects.active().by_org(org).get(flow_uuid=run.flow.uuid)
responses = Response.objects.filter(
flow_run_id=run.id,
pollrun__poll=poll,
contact__uuid=run.contact.uuid,
is_active=True,
)
response = responses.select_related('pollrun').first()
run_updated_on = cls.get_run_updated_on(run)
# if there is an up-to-date existing response for this run, return it
if response and response.updated_on == run_updated_on:
return response
try:
contact = Contact.get_or_fetch(poll.org, uuid=run.contact.uuid)
except NoContactInRapidProWarning as e:
# Callers expect an exception if we don't sync the response
raise ValueError("not syncing response because %s" % e.args[0], e)
except NoMatchingCohortsWarning:
# This happens regularly because tracpro users aren't necessarily interested
# in all contacts' responses. Just pass it on since we're not going to sync.
raise
# categorize completeness
if run.exit_type == u'completed':
status = Response.STATUS_COMPLETE
elif run.values:
status = Response.STATUS_PARTIAL
else:
status = Response.STATUS_EMPTY
if response:
# clear existing answers which will be replaced
response.answers.all().delete()
response.updated_on = run_updated_on
response.status = status
response.save(update_fields=('updated_on', 'status'))
else:
# if we don't have an existing response, then this poll started in
# RapidPro and is non-regional
pollrun = PollRun.objects.get_or_create_universal(
poll=poll,
for_date=run.created_on,
)
response, created = Response.objects.update_or_create(
flow_run_id=run.id,
pollrun=pollrun,
defaults=dict(
is_active=True,
contact=contact,
created_on=run.created_on,
updated_on=run_updated_on,
status=status
)
)
# If more than one for this contact and pollrun,
# set the last one created as the active one.
if Response.objects.filter(pollrun=pollrun, contact=contact).count() > 1:
# Set them all False, then the one we want true
with transaction.atomic():
Response.objects.filter(pollrun=pollrun, contact=contact).update(is_active=False)
last = Response.objects.filter(pollrun=pollrun, contact=contact).order_by('-created_on').first()
last.is_active = True
last.save()
response.is_new = created
# organize values by ruleset UUID
questions = poll.questions.active()
valuesets_by_ruleset = {value.node: value for key, value in run.values.iteritems()}
valuesets_by_question = {q: valuesets_by_ruleset.get(q.ruleset_uuid, None)
for q in questions}
# convert valuesets to answers
for question, valueset in valuesets_by_question.iteritems():
if valueset:
Answer.objects.create(
response=response,
question=question,
value=valueset.value,
category=valueset.category,
submitted_on=valueset.time,
)
return response
@classmethod
def get_run_updated_on(cls, run):
# find the result with the latest time
last_value_on = None
for key, value in run.values.iteritems():
if not last_value_on or value.time > last_value_on:
last_value_on = value.time
return last_value_on if last_value_on else run.created_on
class AnswerQuerySet(models.QuerySet):
def values_to_use(self):
return [a.value_to_use for a in self.select_related('response')]
def word_counts(self):
answers = [(answer.value_to_use, answer.response.contact.language) for answer in self]
words = [extract_words(*a) for a in answers]
counts = Counter(chain(*words))
return counts.most_common(50)
def group_values(self, *fields):
"""Group answers by the given fields then map to a list of matching values."""
answers = [
dict(
value_to_use=answer.value_to_use,
**{fieldname: dunder_to_chained_attrs(answer, fieldname) for fieldname in fields}
)
for answer in self.order_by(*fields).select_related('response', 'question__poll__org')
]
data = {}
for field_values, _answers in groupby(answers, itemgetter(*fields)):
data[field_values] = [a['value_to_use'] for a in _answers]
return data
def category_counts(self):
categories = self.values_list('category', flat=True)
counts = Counter(categories)
return counts.most_common()
def autocategorize(self):
"""
Break down numeric answers into categories automatically, based somewhat
on ranges where there are bunches of responses.
See http://numpy.readthedocs.io/en/stable/reference/generated/numpy.histogram.html
where we're using the 'sqrt' bin assignment algorithm.
Silently ignores answers where `category` != "numeric", and any whose value
can't be successfully converted to a float.
Returns dictionary {
'categories': list of category names in order,
'data': list of counts in order
}
Category names are of the form "N.N-N.N".
"""
answers_to_numeric_questions = self.filter(question__question_type=Question.TYPE_NUMERIC)
answers = get_numeric_values(answers_to_numeric_questions.values_to_use())
if not answers:
return {
'categories': [],
'data': [],
}
hist, bin_edges = np.histogram(answers, bins='sqrt')
category_names = [
"%r-%r" % (round(bin_edges[i], 2), round(bin_edges[i+1], 2))
for i in range(len(hist))
]
data = list(hist)
return {
'categories': category_names,
'data': data,
}
def category_counts_by_pollrun(self):
"""
Returns list of (categoryname, Counter) tuples, sorted by category name.
Each Counter breaks down the number of answers per poll run.
"""
counts = []
answers = self.order_by('category').values('category', 'response__pollrun')
for category, _answers in groupby(answers, itemgetter('category')):
pollrun_counts = Counter(a['response__pollrun'] for a in _answers)
counts.append((category, pollrun_counts))
# Order the data by the category name.
counts.sort(key=lambda (category, pollrun_counts): natural_sort_key(category))
return counts
class AnswerManager(models.Manager.from_queryset(AnswerQuerySet)):
def create(self, category, **kwargs):
# category can be a string or a multi-language dict
if isinstance(category, dict):
if 'base' in category:
category = category['base']
else:
category = category.itervalues().next()
if category == 'All Responses':
category = None
return super(AnswerManager, self).create(category=category, **kwargs)
class Answer(models.Model):
"""
Corresponds to RapidPro FlowStep.
In Temba API, corresponds to one entry in the Run.Value dictionary.
"""
response = models.ForeignKey('polls.Response', related_name='answers')
question = models.ForeignKey('polls.Question', related_name='answers')
value = models.CharField(
max_length=640, null=True,
help_text="Value from rapidpro")
last_value = models.CharField(
max_length=640, null=True,
help_text="For numeric questions, last answer from same contact on same day for same question. "
"Otherwise, same as value."
)
sum_value = models.CharField(
max_length=640, null=True,
help_text="For numeric questions, sum of answers from same contact on same "
"day for same question. Otherwise, same as value."
)
category = models.CharField(max_length=36, null=True)
submitted_on = models.DateTimeField(
help_text=_("When this answer was submitted"))
objects = AnswerManager()
def save(self, *args, **kwargs):
is_new = self.pk is None
super(Answer, self).save(*args, **kwargs)
if is_new:
# If there have been multiple answers by the same contact on the same
# day, we might want to show either the last answer or the sum of the
# numeric answers, depending on other things. Compute those in advance.
answers = self.same_question_contact_and_day().order_by('-submitted_on')
# This failed once in a test with an index out of range?! Could not reproduce.
last_value = answers[0].value
self.last_value = last_value
float_values = get_numeric_values([a.value for a in answers])
if len(float_values) > 0:
sum_value = str(sum(float_values))
self.sum_value = sum_value
else:
sum_value = F('value') # Just use each records' value
self.sum_value = self.value
answers.update(last_value=last_value, sum_value=sum_value)
@property
def org(self):
if not hasattr(self, 'cached_org'):
self.cached_org = self.question.poll.org
return self.cached_org
def same_question_contact_and_day(self):
return Answer.objects.filter(
question_id=self.question_id,
response__contact_id=self.response.contact_id,
submitted_on__gte=midnight(self.submitted_on),
submitted_on__lte=end_of_day(self.submitted_on),
)
@property
def value_to_use(self):
if self.should_use_sum():
return self.sum_value
elif self.should_use_last():
return self.last_value
else:
return self.value
def should_use_sum(self):
"""
Return true if we should use a sum of response values from
the same contact on the same day for the same question
for this answer.
"""
return (self.org.how_to_handle_sameday_responses == SAMEDAY_SUM and
self.question.question_type == Question.TYPE_NUMERIC)
def should_use_last(self):
"""
Return true if we should use the latest of the response values from
the same contact on the same day for the same question
for this answer.
"""
return (self.org.how_to_handle_sameday_responses == SAMEDAY_LAST and
self.question.question_type == Question.TYPE_NUMERIC)
|
|
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d
import numpy
from ..compat import Protocol, runtime_checkable
from ..scorer import Scorer
from ..language import Language
from .trainable_pipe import TrainablePipe
from ..tokens import Doc, SpanGroup, Span
from ..vocab import Vocab
from ..training import Example, validate_examples
from ..errors import Errors
from ..util import registry
spancat_default_config = """
[model]
@architectures = "spacy.SpanCategorizer.v1"
scorer = {"@layers": "spacy.LinearLogistic.v1"}
[model.reducer]
@layers = spacy.mean_max_reducer.v1
hidden_size = 128
[model.tok2vec]
@architectures = "spacy.Tok2Vec.v1"
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v1"
width = 96
rows = [5000, 2000, 1000, 1000]
attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1"
width = ${model.tok2vec.embed.width}
window_size = 1
maxout_pieces = 3
depth = 4
"""
DEFAULT_SPANCAT_MODEL = Config().from_str(spancat_default_config)["model"]
@runtime_checkable
class Suggester(Protocol):
def __call__(self, docs: Iterable[Doc], *, ops: Optional[Ops] = None) -> Ragged:
...
@registry.misc("spacy.ngram_suggester.v1")
def build_ngram_suggester(sizes: List[int]) -> Suggester:
"""Suggest all spans of the given lengths. Spans are returned as a ragged
array of integers. The array has two columns, indicating the start and end
position."""
def ngram_suggester(docs: Iterable[Doc], *, ops: Optional[Ops] = None) -> Ragged:
if ops is None:
ops = get_current_ops()
spans = []
lengths = []
for doc in docs:
starts = ops.xp.arange(len(doc), dtype="i")
starts = starts.reshape((-1, 1))
length = 0
for size in sizes:
if size <= len(doc):
starts_size = starts[: len(doc) - (size - 1)]
spans.append(ops.xp.hstack((starts_size, starts_size + size)))
length += spans[-1].shape[0]
if spans:
assert spans[-1].ndim == 2, spans[-1].shape
lengths.append(length)
lengths_array = cast(Ints1d, ops.asarray(lengths, dtype="i"))
if len(spans) > 0:
output = Ragged(ops.xp.vstack(spans), lengths_array)
else:
output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array)
assert output.dataXd.ndim == 2
return output
return ngram_suggester
@registry.misc("spacy.ngram_range_suggester.v1")
def build_ngram_range_suggester(min_size: int, max_size: int) -> Suggester:
"""Suggest all spans of the given lengths between a given min and max value - both inclusive.
Spans are returned as a ragged array of integers. The array has two columns,
indicating the start and end position."""
sizes = list(range(min_size, max_size + 1))
return build_ngram_suggester(sizes)
@Language.factory(
"spancat",
assigns=["doc.spans"],
default_config={
"threshold": 0.5,
"spans_key": "sc",
"max_positive": None,
"model": DEFAULT_SPANCAT_MODEL,
"suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]},
"scorer": {"@scorers": "spacy.spancat_scorer.v1"},
},
default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0},
)
def make_spancat(
nlp: Language,
name: str,
suggester: Suggester,
model: Model[Tuple[List[Doc], Ragged], Floats2d],
spans_key: str,
scorer: Optional[Callable],
threshold: float,
max_positive: Optional[int],
) -> "SpanCategorizer":
"""Create a SpanCategorizer component. The span categorizer consists of two
parts: a suggester function that proposes candidate spans, and a labeller
model that predicts one or more labels for each span.
suggester (Callable[[Iterable[Doc], Optional[Ops]], Ragged]): A function that suggests spans.
Spans are returned as a ragged array with two integer columns, for the
start and end positions.
model (Model[Tuple[List[Doc], Ragged], Floats2d]): A model instance that
is given a list of documents and (start, end) indices representing
candidate span offsets. The model predicts a probability for each category
for each span.
spans_key (str): Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the
reference document under the same key.
threshold (float): Minimum probability to consider a prediction positive.
Spans with a positive prediction will be saved on the Doc. Defaults to
0.5.
max_positive (Optional[int]): Maximum number of labels to consider positive
per span. Defaults to None, indicating no limit.
"""
return SpanCategorizer(
nlp.vocab,
suggester=suggester,
model=model,
spans_key=spans_key,
threshold=threshold,
max_positive=max_positive,
name=name,
scorer=scorer,
)
def spancat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
kwargs = dict(kwargs)
attr_prefix = "spans_"
key = kwargs["spans_key"]
kwargs.setdefault("attr", f"{attr_prefix}{key}")
kwargs.setdefault("allow_overlap", True)
kwargs.setdefault(
"getter", lambda doc, key: doc.spans.get(key[len(attr_prefix) :], [])
)
kwargs.setdefault("has_annotation", lambda doc: key in doc.spans)
return Scorer.score_spans(examples, **kwargs)
@registry.scorers("spacy.spancat_scorer.v1")
def make_spancat_scorer():
return spancat_score
class SpanCategorizer(TrainablePipe):
"""Pipeline component to label spans of text.
DOCS: https://spacy.io/api/spancategorizer
"""
def __init__(
self,
vocab: Vocab,
model: Model[Tuple[List[Doc], Ragged], Floats2d],
suggester: Suggester,
name: str = "spancat",
*,
spans_key: str = "spans",
threshold: float = 0.5,
max_positive: Optional[int] = None,
scorer: Optional[Callable] = spancat_score,
) -> None:
"""Initialize the span categorizer.
vocab (Vocab): The shared vocabulary.
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the
losses during training.
spans_key (str): Key of the Doc.spans dict to save the spans under.
During initialization and training, the component will look for
spans on the reference document under the same key. Defaults to
`"spans"`.
threshold (float): Minimum probability to consider a prediction
positive. Spans with a positive prediction will be saved on the Doc.
Defaults to 0.5.
max_positive (Optional[int]): Maximum number of labels to consider
positive per span. Defaults to None, indicating no limit.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the Doc.spans[spans_key] with overlapping
spans allowed.
DOCS: https://spacy.io/api/spancategorizer#init
"""
self.cfg = {
"labels": [],
"spans_key": spans_key,
"threshold": threshold,
"max_positive": max_positive,
}
self.vocab = vocab
self.suggester = suggester
self.model = model
self.name = name
self.scorer = scorer
@property
def key(self) -> str:
"""Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the
reference document under the same key.
"""
return str(self.cfg["spans_key"])
def add_label(self, label: str) -> int:
"""Add a new label to the pipe.
label (str): The label to add.
RETURNS (int): 0 if label is already present, otherwise 1.
DOCS: https://spacy.io/api/spancategorizer#add_label
"""
if not isinstance(label, str):
raise ValueError(Errors.E187)
if label in self.labels:
return 0
self._allow_extra_label()
self.cfg["labels"].append(label) # type: ignore
self.vocab.strings.add(label)
return 1
@property
def labels(self) -> Tuple[str]:
"""RETURNS (Tuple[str]): The labels currently added to the component.
DOCS: https://spacy.io/api/spancategorizer#labels
"""
return tuple(self.cfg["labels"]) # type: ignore
@property
def label_data(self) -> List[str]:
"""RETURNS (List[str]): Information about the component's labels.
DOCS: https://spacy.io/api/spancategorizer#label_data
"""
return list(self.labels)
def predict(self, docs: Iterable[Doc]):
"""Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict.
RETURNS: The models prediction for each document.
DOCS: https://spacy.io/api/spancategorizer#predict
"""
indices = self.suggester(docs, ops=self.model.ops)
scores = self.model.predict((docs, indices)) # type: ignore
return indices, scores
def set_annotations(self, docs: Iterable[Doc], indices_scores) -> None:
"""Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
scores: The scores to set, produced by SpanCategorizer.predict.
DOCS: https://spacy.io/api/spancategorizer#set_annotations
"""
labels = self.labels
indices, scores = indices_scores
offset = 0
for i, doc in enumerate(docs):
indices_i = indices[i].dataXd
doc.spans[self.key] = self._make_span_group(
doc, indices_i, scores[offset : offset + indices.lengths[i]], labels # type: ignore[arg-type]
)
offset += indices.lengths[i]
def update(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/spancategorizer#update
"""
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
validate_examples(examples, "SpanCategorizer.update")
self._validate_categories(examples)
if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples):
# Handle cases where there are no tokens in any docs.
return losses
docs = [eg.predicted for eg in examples]
spans = self.suggester(docs, ops=self.model.ops)
if spans.lengths.sum() == 0:
return losses
set_dropout_rate(self.model, drop)
scores, backprop_scores = self.model.begin_update((docs, spans))
loss, d_scores = self.get_loss(examples, (spans, scores))
backprop_scores(d_scores) # type: ignore
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += loss
return losses
def get_loss(
self, examples: Iterable[Example], spans_scores: Tuple[Ragged, Floats2d]
) -> Tuple[float, float]:
"""Find the loss and gradient of loss for the batch of documents and
their predicted scores.
examples (Iterable[Examples]): The batch of examples.
spans_scores: Scores representing the model's predictions.
RETURNS (Tuple[float, float]): The loss and the gradient.
DOCS: https://spacy.io/api/spancategorizer#get_loss
"""
spans, scores = spans_scores
spans = Ragged(
self.model.ops.to_numpy(spans.data), self.model.ops.to_numpy(spans.lengths)
)
label_map = {label: i for i, label in enumerate(self.labels)}
target = numpy.zeros(scores.shape, dtype=scores.dtype)
offset = 0
for i, eg in enumerate(examples):
# Map (start, end) offset of spans to the row in the d_scores array,
# so that we can adjust the gradient for predictions that were
# in the gold standard.
spans_index = {}
spans_i = spans[i].dataXd
for j in range(spans.lengths[i]):
start = int(spans_i[j, 0]) # type: ignore
end = int(spans_i[j, 1]) # type: ignore
spans_index[(start, end)] = offset + j
for gold_span in self._get_aligned_spans(eg):
key = (gold_span.start, gold_span.end)
if key in spans_index:
row = spans_index[key]
k = label_map[gold_span.label_]
target[row, k] = 1.0
# The target is a flat array for all docs. Track the position
# we're at within the flat array.
offset += spans.lengths[i]
target = self.model.ops.asarray(target, dtype="f") # type: ignore
# The target will have the values 0 (for untrue predictions) or 1
# (for true predictions).
# The scores should be in the range [0, 1].
# If the prediction is 0.9 and it's true, the gradient
# will be -0.1 (0.9 - 1.0).
# If the prediction is 0.9 and it's false, the gradient will be
# 0.9 (0.9 - 0.0)
d_scores = scores - target
loss = float((d_scores**2).sum())
return loss, d_scores
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
labels: Optional[List[str]] = None,
) -> None:
"""Initialize the pipe for training, using a representative set
of data examples.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Optional[Language]): The current nlp object the component is part of.
labels (Optional[List[str]]): The labels to add to the component, typically generated by the
`init labels` command. If no labels are provided, the get_examples
callback is used to extract the labels from the data.
DOCS: https://spacy.io/api/spancategorizer#initialize
"""
subbatch: List[Example] = []
if labels is not None:
for label in labels:
self.add_label(label)
for eg in get_examples():
if labels is None:
for span in eg.reference.spans.get(self.key, []):
self.add_label(span.label_)
if len(subbatch) < 10:
subbatch.append(eg)
self._require_labels()
if subbatch:
docs = [eg.x for eg in subbatch]
spans = build_ngram_suggester(sizes=[1])(docs)
Y = self.model.ops.alloc2f(spans.dataXd.shape[0], len(self.labels))
self.model.initialize(X=(docs, spans), Y=Y)
else:
self.model.initialize()
def _validate_categories(self, examples: Iterable[Example]):
# TODO
pass
def _get_aligned_spans(self, eg: Example):
return eg.get_aligned_spans_y2x(
eg.reference.spans.get(self.key, []), allow_overlap=True
)
def _make_span_group(
self, doc: Doc, indices: Ints2d, scores: Floats2d, labels: List[str]
) -> SpanGroup:
spans = SpanGroup(doc, name=self.key)
max_positive = self.cfg["max_positive"]
threshold = self.cfg["threshold"]
keeps = scores >= threshold
ranked = (scores * -1).argsort() # type: ignore
if max_positive is not None:
assert isinstance(max_positive, int)
span_filter = ranked[:, max_positive:]
for i, row in enumerate(span_filter):
keeps[i, row] = False
spans.attrs["scores"] = scores[keeps].flatten()
indices = self.model.ops.to_numpy(indices)
keeps = self.model.ops.to_numpy(keeps)
for i in range(indices.shape[0]):
start = indices[i, 0]
end = indices[i, 1]
for j, keep in enumerate(keeps[i]):
if keep:
spans.append(Span(doc, start, end, label=labels[j]))
return spans
|
|
from __future__ import unicode_literals
import copy
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase
from django.utils._os import upath
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView,
NamedUrlCookieWizardView)
from django.contrib.formtools.tests.wizard.test_forms import get_request, Step1, Step2
from .forms import temp_storage
# On Python 2, __file__ may end with .pyc
THIS_FILE = upath(__file__).rstrip("c")
UPLOADED_FILE_NAME = 'tests.py'
class NamedWizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
# Get new step data, since we modify it during the tests.
self.wizard_step_data = copy.deepcopy(self.wizard_step_data)
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def tearDown(self):
# Ensure that there are no files in the storage which could lead to false
# results in the next tests. Deleting the whole storage dir is not really
# an option since the storage is defined on the module level and can't be
# easily reinitialized. (FIXME: The tests here should use the view classes
# directly instead of the test client, then the storage issues would go
# away too.)
for file in temp_storage.listdir('')[1]:
temp_storage.delete(file)
def test_initial_call(self):
response = self.client.get(reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
self.assertEqual(wizard['url_name'], self.wizard_urlname)
def test_initial_call_with_params(self):
get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
response = self.client.get(reverse('%s_start' % self.wizard_urlname),
get_params)
self.assertEqual(response.status_code, 302)
# Test for proper redirect GET parameters
location = response.url
self.assertNotEqual(location.find('?'), -1)
querydict = QueryDict(location[location.find('?') + 1:])
self.assertEqual(dict(querydict.items()), get_params)
def test_form_post_error(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={
'step': response.context['wizard']['steps'].current
}), {'wizard_goto_step': response.context['wizard']['steps'].prev})
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_jump(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form3'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
def test_form_finish(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
with open(THIS_FILE, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
# Check that the file got uploaded properly.
with open(THIS_FILE, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2:
self.assertEqual(f.read(), f2.read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
# After the wizard is done no files should exist anymore.
self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME))
all_data = response.context['form_list']
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'}, {'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
with open(THIS_FILE, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(temp_storage.exists(UPLOADED_FILE_NAME))
step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'})
response = self.client.get(step2_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
with open(THIS_FILE, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2:
self.assertEqual(f.read(), f2.read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].name, UPLOADED_FILE_NAME)
self.assertTrue(all_data['file1'].closed)
self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME))
del all_data['file1']
self.assertEqual(
all_data,
{'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}
]})
form_dict = response.context['form_dict']
self.assertIn('form1', form_dict.keys())
self.assertIn('form2', form_dict.keys())
self.assertEqual(form_dict['form1'].cleaned_data, response.context['form_list'][0])
def test_manipulated_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
with open(THIS_FILE, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
loc = response.url
response = self.client.get(loc)
self.assertEqual(response.status_code, 200, loc)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_reset(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.get(
'%s?reset=1' % reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
@skipIfCustomUser
class NamedSessionWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_session'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class NamedCookieWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_cookie'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class NamedFormTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def test_revalidation(self):
request = get_request()
testform = self.formwizard_class.as_view(
[('start', Step1), ('step2', Step2)],
url_name=self.wizard_urlname)
response, instance = testform(request, step='done')
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs)
return response, self
@skipIfCustomUser
class NamedSessionFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlSessionWizardView
wizard_urlname = 'nwiz_session'
@skipIfCustomUser
class NamedCookieFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlCookieWizardView
wizard_urlname = 'nwiz_cookie'
|
|
from datetime import date, datetime
import getpass
import json
import logging
from os.path import join
import re
import shutil
import subprocess
import tempfile
import uuid
from urllib.parse import urljoin
from xml.etree import ElementTree
import argh
from argh.interaction import safe_input
import arghlog
import requests
from requests.auth import HTTPBasicAuth
__version__ = '1.0'
SRC_ATTR_RE = re.compile(r"""
(?P<src> src \s*=\s* )
(?P<quote> ["'] )
(?P<url>.*?)
(?=(?P=quote))
""", re.IGNORECASE | re.VERBOSE | re.MULTILINE | re.DOTALL)
CONTENTS_NCX_XML = """
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" xml:lang="en">
<head>
<meta name="dtb:uid"/>
<meta content="3" name="dtb:depth"/>
<meta content="0" name="dtb:totalPageCount"/>
<meta content="0" name="dtb:maxPageNumber"/>
</head>
<docTitle>
<text></text>
</docTitle>
<navMap></navMap>
</ncx>
"""
CONTENT_OPF_XML = """
<package xmlns="http://www.idpf.org/2007/opf" version="2.0" unique-identifier="uid">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc-metadata>
<dc:title></dc:title>
<dc:language>en</dc:language>
<dc:identifier id="uid"></dc:identifier>
<dc:creator>pinboard-zine</dc:creator>
<dc:source>pinboard-zine</dc:source>
<dc:date opf:event="publication"></dc:date>
<!-- meta name="EmbeddedCover" content="images/image00002.jpeg"/ -->
</dc-metadata>
<x-metadata>
<output content-type="application/x-mobipocket-subscription-magazine" encoding="utf-8"/>
</x-metadata>
</metadata>
<manifest>
<item href="contents.ncx" id="ncx" media-type="application/x-dtbncx+xml"/>
<item href="contents.html" media-type="application/xhtml+xml" id="contents"/>
</manifest>
<spine toc="ncx"></spine>
<tours></tours>
<guide>
<reference title="Beginning" type="start" href="contents.html"/>
</guide>
</package>
"""
ARTICLE_HTML = """
<html><head>
<meta charset="utf-8"/>
<title></title>
</head><body>
<h3 id="top"></h3>
<h4><a href=""></a></h4>
<hr/>
</body></html>
"""
def contents_ncx_for_articles(articles, uid, title):
root = ElementTree.fromstring(CONTENTS_NCX_XML)
# Add head/meta name=dtb:uid
uid_node = root.find("./{http://www.daisy.org/z3986/2005/ncx/}head/{http://www.daisy.org/z3986/2005/ncx/}meta[@name='dtb:uid']")
uid_node.attrib['content'] = uid
# Add docTitle/text
title_node = root.find("./{http://www.daisy.org/z3986/2005/ncx/}docTitle/{http://www.daisy.org/z3986/2005/ncx/}text")
title_node.text = title
# Add navMap/navPointz.
navmap_node = root.find("./{http://www.daisy.org/z3986/2005/ncx/}navMap")
def nav_point(parent, order, title, src, kind):
point = ElementTree.SubElement(parent, '{http://www.daisy.org/z3986/2005/ncx/}navPoint', {
'id': 'nav-{}'.format(order),
'playOrder': str(order),
'class': kind,
})
label = ElementTree.SubElement(point, '{http://www.daisy.org/z3986/2005/ncx/}navLabel')
label_text = ElementTree.SubElement(label, '{http://www.daisy.org/z3986/2005/ncx/}text')
label_text.text = title
content = ElementTree.SubElement(point, '{http://www.daisy.org/z3986/2005/ncx/}content', {
'src': src
})
return point
first_article = articles[0]
toc_point = nav_point(navmap_node, 1, 'Table of Contents', first_article['filename'], 'periodical')
section_point = nav_point(toc_point, 2, 'Unread', first_article['filename'] + '#top', 'section')
for order, article in enumerate(articles, 3):
filename = article['filename']
if order == 3: # first article
filename += '#top'
article_point = nav_point(section_point, order, article['title'], filename, 'article')
if article['description']:
ElementTree.SubElement(article_point, '{http://mobipocket.com/ns/mbp}meta', {
'name': 'description',
}).text = article['description']
if article['author']:
ElementTree.SubElement(article_point, '{http://mobipocket.com/ns/mbp}meta', {
'name': 'author',
}).text = article['author']
ElementTree.register_namespace('', 'http://www.daisy.org/z3986/2005/ncx/')
ElementTree.register_namespace('mbp', 'http://mobipocket.com/ns/mbp')
return ElementTree.tostring(root, encoding='unicode')
def content_opf_for_articles(articles, uid, title):
root = ElementTree.fromstring(CONTENT_OPF_XML)
title_node = root.find("./{http://www.idpf.org/2007/opf}metadata//{http://purl.org/dc/elements/1.1/}title")
title_node.text = title
uid_node = root.find("./{http://www.idpf.org/2007/opf}metadata//{http://purl.org/dc/elements/1.1/}identifier[@id='uid']")
uid_node.text = uid
date_node = root.find("./{http://www.idpf.org/2007/opf}metadata//{http://purl.org/dc/elements/1.1/}date")
date_node.text = datetime.utcnow().isoformat()
manifest_node = root.find("./{http://www.idpf.org/2007/opf}manifest")
spine_node = root.find("./{http://www.idpf.org/2007/opf}spine")
guide_node = root.find("./{http://www.idpf.org/2007/opf}guide")
# Let's track image IDs. If we don't, and multiple articles are from the
# same site and embed the same image, we'll make duplicate opf items for
# them, which is fatal to kindlegen.
seen_image_ids = set()
for article in articles:
ElementTree.SubElement(guide_node, '{http://www.idpf.org/2007/opf}reference', {
'title': article['title'],
'href': article['filename'],
'type': 'text',
})
ElementTree.SubElement(manifest_node, '{http://www.idpf.org/2007/opf}item', {
'href': article['filename'],
# Cheat by using the filename as the id too. The whole thing! Right in there!
'id': article['filename'],
'media-type': 'application/xhtml+xml',
})
ElementTree.SubElement(spine_node, '{http://www.idpf.org/2007/opf}itemref', {
'idref': article['filename'],
})
for image in article.get('images', ()):
if image['filename'] in seen_image_ids:
continue
seen_image_ids.add(image['filename'])
ElementTree.SubElement(manifest_node, '{http://www.idpf.org/2007/opf}item', {
'href': image['filename'],
'id': image['filename'],
'media-type': image['type'],
})
ElementTree.register_namespace('', 'http://www.idpf.org/2007/opf')
ElementTree.register_namespace('dc', 'http://purl.org/dc/elements/1.1/')
return ElementTree.tostring(root, encoding='unicode')
def contents_html_for_articles(articles, uid, title):
HTML = """
<html><head>
<meta charset="utf-8">
<title>{title}</title>
</head><body>
<h1>Table of Contents</h1>
<ul>
{items}
</ul>
</body></html>
"""
ITEM = """
<li><a href="{filename}">{title}</a> {description}</li>
"""
items = ''.join(ITEM.format(**article) for article in articles)
html = HTML.format(title=title, items=items)
return html
def html_for_readable_article(article, readable, content):
root = ElementTree.fromstring(ARTICLE_HTML.strip())
title_node = root.find('./head/title')
title_node.text = article['title']
head_node = root.find('./head')
if article['author']:
ElementTree.SubElement(head_node, 'meta', {
'name': 'author',
'content': article['author'],
})
if article['description']:
ElementTree.SubElement(head_node, 'meta', {
'name': 'description',
'content': article['description'],
})
title_node = root.find('./body/h3')
title_node.text = article['title']
link_node = root.find('./body/h4/a')
link_node.attrib['href'] = article['u']
link_node.text = readable['domain']
if article['author']:
link_node.tail = ' by ' + article['author']
html = ElementTree.tostring(root, encoding='unicode')
html = html[:-len('</body></html>')]
html = ''.join(('<?xml version="1.0" encoding="utf-8"?>\n', html, content, '</body></html>'))
return html
@argh.arg('--skip', default=None, action='append')
@argh.arg('--tag', default=None, action='append')
@argh.arg('--skip-tag', default=None, action='append')
def zine(username: 'Pinboard username to find articles for',
outputfile: 'filename for the output mobi file',
items: 'number of items to put in the zine' =20,
readability_token: 'Readability Parser API token to use to parse articles' =None,
skip: 'URLs of articles not to include' =None,
tag: 'tags articles must have to be included' =None,
skip_tag: 'tags of articles not to include' =None):
req = requests.Session()
req.headers.update({'user-agent': 'pinboard-zine/{}'.format(__version__)})
# What pinboard account do I use?
try:
password = getpass.getpass('Pinboard password for {}: '.format(username))
except KeyboardInterrupt:
return
pinboard_auth = HTTPBasicAuth(username, password)
res = requests.get('https://api.pinboard.in/v1/user/secret?format=json', auth=pinboard_auth, verify=True)
if res.status_code == 401:
raise argh.CommandError("Could not connect to Pinboard with that username. Is your password correct?")
res.raise_for_status()
data = res.json()
secret = data['result']
# We want the oldest, so ask for as many posts as possible.
# We *could* ask to filter by a tag here, but only one, and not to exclude,
# so don't bother.
feed_url = 'https://feeds.pinboard.in/json/secret:{}/u:{}/toread/?count=400'.format(secret, username)
res = req.get(feed_url, verify=True)
# The secret should be correct, so don't try to handle an auth error.
res.raise_for_status()
data = res.json()
articles = reversed(data)
# Start making a new zine (tmpdir).
zinedir = tempfile.mkdtemp()
logging.debug("Writing mobi files to %s", zinedir)
if readability_token is None:
try:
readability_token = safe_input('Readability Parser API token: ')
except KeyboardInterrupt:
return
# For each of however many unread items:
saved = list()
skip = set(skip) if skip is not None else set()
skip_tags = set(skip_tag) if skip_tag is not None else set()
include_tags = set(tag) if tag is not None else None
for article in articles:
# Fetch the resource.
url = article['u']
if url in skip:
logging.info("Skipping article '%s' with URL %s as requested", article['d'], article['u'])
continue
article_tags = set(t for t in article['t'] if t)
if skip_tags & article_tags:
some_tag = (skip_tags & article_tags).pop()
logging.info("Skipping article '%s' with tag '%s' as requested", article['d'], some_tag)
continue
if include_tags:
if not include_tags & article_tags:
continue
some_tag = (include_tags & article_tags).pop()
logging.info("Including article '%s' with tag '%s' as requested", article['d'], some_tag)
params = {
'url': url,
'token': readability_token,
}
try:
res = req.get('https://readability.com/api/content/v1/parser', params=params, timeout=10)
res.raise_for_status()
except requests.exceptions.Timeout:
logging.warning("Request for article '%s' timed out, skipping", article['d'])
continue
except Exception as exc:
logging.exception("Couldn't request article '%s', skipping", article['d'], exc_info=exc)
continue
readable = res.json()
article['title'] = readable['title'] or article['d']
if not article['title']:
article['title'] = '{} article'.format(readable['domain'])
article['description'] = article['n'] or readable['dek'] or readable['excerpt']
article['author'] = readable['author']
content = readable['content']
downloaded_images = set()
def download_image(match):
img_url = urljoin(url, match.group('url'))
img_filename = re.sub(r'[\W_]+', '-', img_url)
if img_url not in downloaded_images:
downloaded_images.add(img_url)
res = req.get(img_url)
try:
res.raise_for_status()
except Exception as exc:
logging.debug("Got error downloading referenced image %s, not changing img: %s", img_url, str(exc))
return match.group(0)
content_type = res.headers['content-type']
if content_type in ('image/jpg', 'image/jpeg'):
img_filename += '.jpeg'
elif content_type == 'image/gif':
img_filename += '.gif'
elif content_type == 'image/png':
img_filename += '.png'
elif content_type.startswith('image/'):
# Some unknown image type. Try the unextensionized filename.
pass
else:
logging.warning("Saved image %s with unknown content type %s", img_url, content_type)
with open(join(zinedir, img_filename), 'wb') as f:
f.write(res.content)
images = article.setdefault('images', list())
images.append({
'filename': img_filename,
'type': content_type,
})
return ''.join((match.group('src'), match.group('quote'), img_filename))
content = SRC_ATTR_RE.sub(download_image, readable['content'])
read_html = html_for_readable_article(article, readable, content)
# Write it to the zine directory.
filename = article['filename'] = re.sub(r'[\W_]+', '-', url) + '.html'
with open(join(zinedir, filename), 'w') as f:
f.write(read_html)
logging.debug("Saved article '%s'", article['title'])
saved.append(article)
if len(saved) >= items:
break
# Write the metadata files to the zine directory.
uid = uuid.uuid4().hex
title = "Pinboard Unread"
ncx_xml = contents_ncx_for_articles(saved, uid, title)
opf_xml = content_opf_for_articles(saved, uid, title)
toc_html = contents_html_for_articles(saved, uid, title)
with open(join(zinedir, 'contents.ncx'), 'w') as f:
f.write(ncx_xml)
content_opf_filename = join(zinedir, 'content.opf')
with open(content_opf_filename, 'w') as f:
f.write(opf_xml)
with open(join(zinedir, 'contents.html'), 'w') as f:
f.write(toc_html)
logging.debug("Wrote all the files to %s, running kindlegen", zinedir)
# Run kindlegen to mobify the zine.
try:
output = subprocess.check_output(['kindlegen', content_opf_filename, '-o', 'pinboardzine.mobi'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output
# Sadly warnings are expected.
if b'Mobi file built with WARNINGS' not in output:
log_filename = outputfile + '.log'
# CalledProcessError output is a bytes, so write out bytes.
with open(log_filename, 'wb') as f:
f.write(output)
raise argh.CommandError("The zine file could not be built correctly. See kindlegen output at {} for errors.".format(log_filename))
shutil.copyfile(join(zinedir, 'pinboardzine.mobi'), outputfile)
logging.debug("Wrote Kindle mobi to %s", outputfile)
# Everything went smoothly! Remove the zine dir.
shutil.rmtree(zinedir)
logging.debug("Cleaned up source directory")
def main():
parser = argh.ArghParser()
arghlog.add_logging(parser)
parser.set_default_command(zine)
logging.getLogger('requests').propagate = False
parser.dispatch()
if __name__ == '__main__':
main()
|
|
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
Norton Luo
This test validate the system level function of RackHD image-service. This test include VMware ESXi install and node
rediscover.It use image server to store ESXi image and microkernel used for RackHD discovery.
You need put an config file in the /config directory
'''
import os
import sys
import time
import flogging
import random
import fit_common
import urllib2
import pexpect
import unittest
import test_api_utils
from nose.plugins.attrib import attr
logs = flogging.get_loggers()
@attr(all=False, regression=False, smoke=False, imageservice=False)
class test_image_service_system(fit_common.unittest.TestCase):
def _get_serverip(self):
args = fit_common.fitargs()['unhandled_arguments']
for arg in args:
if "imageserver" in arg:
serverip = arg.split("=")[1]
return serverip
def _apply_obmsetting_to_node(self, nodeid):
usr = ''
pwd = ''
response = fit_common.rackhdapi(
'/api/2.0/nodes/' + nodeid + '/catalogs/bmc')
if response['status'] in range(200, 205):
bmcip = response['json']['data']['IP Address']
else:
bmcip = "0.0.0.0"
if bmcip == "0.0.0.0":
response = fit_common.rackhdapi(
'/api/2.0/nodes/' + nodeid + '/catalogs/rmm')
if response['status'] in range(200, 205):
bmcip = response['json']['data']['IP Address']
else:
return False
# Try credential record in config file
for creds in fit_common.fitcreds()['bmc']:
if fit_common.remote_shell(
'ipmitool -I lanplus -H ' + bmcip + ' -U ' +
creds['username'] + ' -P ' + creds['password'] + ' fru')['exitcode'] == 0:
usr = creds['username']
pwd = creds['password']
break
# Put the credential to OBM settings
if usr != "":
payload = {
"service": "ipmi-obm-service",
"config": {
"host": bmcip,
"user": usr,
"password": pwd},
"nodeId": nodeid}
api_data = fit_common.rackhdapi("/api/2.0/obms", action='put', payload=payload)
if api_data['status'] == 201:
return True
return False
def _upload_os_by_network(self, osname, osversion, source_url):
mon_url = '/images?name=' + osname + '&version=' + osversion + '&isoweb=' + source_url
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful(
"http://" +
serverip +
":" + control_port +
mon_url,
rest_action="put",
rest_payload={},
rest_timeout=None,
rest_headers={})
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201, got:' + str(response['status']))
return "fail"
def _list_file(self, mon_url):
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url)
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _delete_os_image(self, osname, osversion):
mon_url = '/images?name=' + osname + '&version=' + osversion
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="delete")
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _delete_os_iso(self, isoname):
mon_url = '/iso?name=' + isoname
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="delete")
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _wait_for_task_complete(self, taskid, retries=60):
for dummy in range(0, retries):
result = fit_common.rackhdapi('/api/2.0/workflows/' + taskid)
if result['json']["status"] == 'running' or result['json']["status"] == 'pending':
logs.debug("OS Install workflow state: {}".format(result['json']["status"]))
fit_common.time.sleep(30)
elif result['json']["status"] == 'succeeded':
logs.debug("OS Install workflow state: {}".format(result['json']["status"]))
return True
else:
break
logs.error("Task failed with the following state: " + result['json']["status"])
return False
def _get_tester_ip(self):
serverip = self._get_serverip()
monip = fit_common.fitcfg()["rackhd-config"]["apiServerAddress"]
cmd = "ping -R -c 1 " + monip + ""
(command_output, exitstatus) = pexpect.run(
"ssh -q -o StrictHostKeyChecking=no -t " + fit_common.fitcfg()["image_service"]['usr'] + "@" + serverip +
" sudo bash -c \\\"" + cmd + "\\\"", withexitstatus=1,
events={"assword": fit_common.fitcfg()["image_service"]['pwd'] + "\n"}, timeout=300)
uud = command_output.split("\t")
myip = uud[1].split("\r\n")[0]
logs.debug('My IP address is: ' + myip)
return myip
def _create_esxi_repo(self):
logs.debug("create a ESXi repo")
for osrepo in fit_common.fitcfg()["image_service"]["os_image"]:
if osrepo["osname"] == "ESXi" and osrepo["version"] == "6.0":
os_name = osrepo["osname"]
os_version = osrepo["version"]
http_iso_url = osrepo["url"]
self._upload_os_by_network(os_name, os_version, http_iso_url)
logs.debug("create ESXi repo successfully")
return
logs.error("No ESXi source found in config")
def _upload_microkernel(self, filename):
myfile = open(filename, 'rb')
serverip = self._get_serverip()
mon_url = '/microkernel?name=' + filename
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="binary-put",
rest_payload=myfile)
if response['status'] in range(200, 205):
return response['json']
else:
logs.debug_3('Incorrect HTTP return code, expected 201, got:' + str(response['status']))
return "fail"
def _delete_microkernel(self, filename):
mon_url = '/microkernel?name=' + filename
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="delete")
if response['status'] in range(200, 205):
return response['json']
else:
logs.debug_3('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _scp_file(self, url):
file_name = url.split('/')[-1]
logs.debug_3("scp file %s from RackHD" % url)
if not os.path.exists(file_name):
path = url[6:]
rackhd_hostname = fit_common.fitargs()['rackhd_host']
scp_file = fit_common.fitcreds()['rackhd_host'][0]['username'] + '@{0}:{1}'.format(rackhd_hostname, path)
cmd = 'scp -o StrictHostKeyChecking=no {0} .'.format(scp_file)
logs.debug_3("scp command : '{0}'".format(cmd))
logfile_redirect = None
if fit_common.VERBOSITY >= 9:
logfile_redirect = sys.stdout
(command_output, ecode) = pexpect.run(
cmd, withexitstatus=1,
events={'(?i)assword: ': fit_common.fitcreds()['rackhd_host'][0]['password'] + '\n'},
logfile=logfile_redirect)
assert ecode == 0, 'failed "{0}" because {1}. Output={2}'.format(cmd, ecode, command_output)
return file_name
def _download_file(self, url):
logs.debug_3("downloading url=%s" % url)
file_name = url.split('/')[-1]
if os.path.exists(file_name) is False:
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
logs.debug_3("Downloading: %s Bytes: %s" % (file_name, file_size))
file_size_dl = 0
block_sz = 8192
while True:
file_buffer = u.read(block_sz)
if not file_buffer:
break
file_size_dl += len(file_buffer)
f.write(file_buffer)
# logs dose not have ability to draw digital in original place. use print instead.
if fit_common.VERBOSITY >= 9:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1) + "\r"
print(status)
f.close()
return file_name
def _upload_all_microkernels(self):
for microkernelrepo in fit_common.fitcfg()["image_service"]["microkernel"]:
if microkernelrepo[:3] == "scp":
file_name = self._scp_file(microkernelrepo)
else:
file_name = self._download_file(microkernelrepo)
self._upload_microkernel(file_name)
self._release(file_name)
def _release(self, file_name):
try:
logs.debug_3("rm " + file_name)
os.system("rm " + file_name)
return True
except OSError:
return False
def _delete_all_microkernels(self):
microkernel_list = self._list_file('/microkernel')
for microkernel in microkernel_list:
self.assertNotEqual(self._delete_microkernel(microkernel["name"]), "fail", "delete image failed!")
microkernel_list_clear = self._list_file('/microkernel')
self.assertTrue(microkernel_list_clear == [])
logs.debug_3("All microkernels are cleared!")
def _delete_all_images(self):
os_image_list = self._list_os_image()
serverip = self._get_serverip()
for image_repo in os_image_list:
self.assertNotEqual(
self._delete_os_image(image_repo["name"], image_repo["version"]), "fail", "delete image failed!")
file_port = str(fit_common.fitcfg()["image_service"]["file_port"])
fileurlprefix = "http://" + serverip + ":" + file_port + "/" + image_repo["name"] + '/' + \
image_repo["version"] + '/'
self.assertFalse(self._file_exists(fileurlprefix), "The repo url does not deleted completely")
os_image_list_clear = self._list_os_image()
self.assertTrue(os_image_list_clear == [])
os_iso_list = self._list_os_iso()
for iso_repo in os_iso_list:
self.assertNotEqual(self._delete_os_iso(iso_repo["name"]), "fail", "delete iso failed!")
os_iso_list_clear = self._list_os_iso()
self.assertTrue(os_iso_list_clear == [], "The iso does not deleted completely")
logs.debug("All repo is cleared!")
def _wait_for_discover(self, node_uuid):
for dummy in range(0, 30):
fit_common.time.sleep(30)
rest_data = fit_common.rackhdapi('/redfish/v1/Systems/')
if rest_data['json']['Members@odata.count'] == 0:
continue
node_collection = rest_data['json']['Members']
for computenode in node_collection:
nodeidurl = computenode['@odata.id']
api_data = fit_common.rackhdapi(nodeidurl)
if api_data['status'] > 399:
break
if node_uuid == api_data['json']['UUID']:
return True
logs.error("Timeout in rediscovery!")
return False
def test_bootstrapping_ext_esxi6(self):
self._create_esxi_repo()
node_collection = test_api_utils.get_node_list_by_type("compute")
fileserver_ip = self._get_tester_ip()
file_port = str(fit_common.fitcfg()["image_service"]["file_port"])
repourl = "http://" + fileserver_ip + ':' + file_port + '/ESXi' + '/' + '6.0' + '/'
# Select one node at random
for dummy in node_collection:
node = node_collection[random.randint(0, len(node_collection) - 1)]
logs.debug('Running ESXI 6.0 bootstrap from external file server.')
node_obm = fit_common.rackhdapi(
'/api/2.0/nodes/' + node)['json']['obms']
if node_obm == []:
self.assertTrue(self._apply_obmsetting_to_node(node), "Fail to apply obm setting!")
fit_common.rackhdapi(
'/api/2.0/nodes/' + node + '/workflows/action', action='put',
payload={
"command": "cancel",
"options": {}
})
nodehostname = 'esxi60'
payload_data = {"options": {
"defaults": {
"version": "6.0",
"repo": repourl,
"rootPassword": "1234567",
"hostname": nodehostname
}}}
result = fit_common.rackhdapi(
'/api/2.0/nodes/' + node + '/workflows?name=Graph.InstallEsxi', action='post', payload=payload_data)
self.assertEqual(
result['status'], 201, 'Was expecting code 201. Got ' + str(result['status']))
self.assertEqual(
self._wait_for_task_complete(result['json']["instanceId"], retries=80), True,
'TaskID ' + result['json']["instanceId"] + ' not successfully completed.')
self._delete_all_images()
def test_rediscover(self):
# Select one node at random that's not a management server
self._upload_all_microkernels()
node_collection = test_api_utils.get_node_list_by_type("compute")
for dummy in node_collection:
node = node_collection[random.randint(0, len(node_collection) - 1)]
if fit_common.rackhdapi('/api/2.0/nodes/' + node)['json']['name'] != "Management Server":
break
logs.debug_3('Checking OBM setting...')
node_obm = fit_common.rackhdapi('/api/2.0/nodes/' + node)['json']['obms']
if node_obm == []:
self.assertTrue(self._apply_obmsetting_to_node(node), "Fail to apply obm setting!")
node_uuid = fit_common.rackhdapi('/redfish/v1/Systems/' + node)['json']['UUID']
logs.debug_3('UUID of selected Node is:' + node_uuid)
# Cancel all active workflow on target node
fit_common.rackhdapi(
'/api/2.0/nodes/' + node + '/workflows/action', action='put',
payload={
"command": "cancel",
"options": {}
})
logs.debug_3('Running rediscover, resetting system node...')
# Reboot the node to begin rediscover.
resetresponse = fit_common.rackhdapi(
'/redfish/v1/Systems/' + node + '/Actions/ComputerSystem.Reset', action='post',
payload={"reset_type": "ForceRestart"})
self.assertTrue(resetresponse['status'] < 209,
'Incorrect HTTP return code, expected <209 , got:' + str(resetresponse['status']))
# Delete original node
for dummy in range(0, 30):
time.sleep(2)
result = fit_common.rackhdapi('/api/2.0/nodes/' + node, action='delete')
if result['status'] < 209:
break
self.assertTrue(result['status'] < 209, 'Was expecting response code < 209. Got ' + str(result['status']))
logs.debug_3("Waiting node reboot and boot into microkernel........")
self.assertTrue(self._wait_for_discover(node_uuid), "Fail to find the orignial node after reboot!")
logs.debug_3("Found the orignial node. It is rediscovered succesfully!")
self._delete_all_microkernels()
if __name__ == '__main__':
unittest.main()
|
|
"""Tornado handlers for nbgrader course list web service."""
import os
import contextlib
import json
import traceback
from tornado import web
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado import gen
from textwrap import dedent
from urllib.parse import urlparse
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import IPythonHandler
from jupyter_core.paths import jupyter_config_path
from ...apps import NbGrader
from ...auth import Authenticator
from ...auth.jupyterhub import (JupyterhubEnvironmentError, get_jupyterhub_api_url,
get_jupyterhub_authorization, get_jupyterhub_user)
from ...coursedir import CourseDirectory
from ... import __version__ as nbgrader_version
@contextlib.contextmanager
def chdir(dirname):
currdir = os.getcwd()
os.chdir(dirname)
yield
os.chdir(currdir)
class CourseListHandler(IPythonHandler):
@property
def assignment_dir(self):
return self.settings['assignment_dir']
def get_base_url(self):
parts = urlparse(self.request.full_url())
base_url = parts.scheme + "://" + parts.netloc
return base_url.rstrip("/")
def load_config(self):
paths = jupyter_config_path()
paths.insert(0, os.getcwd())
app = NbGrader()
app.config_file_paths.append(paths)
app.load_config_file()
return app.config
@gen.coroutine
def check_for_local_formgrader(self, config):
base_url = self.get_base_url() + "/" + self.base_url.lstrip("/")
base_url = base_url.rstrip("/")
url = base_url + "/formgrader/api/status"
header = {"Authorization": "token {}".format(self.token)}
http_client = AsyncHTTPClient()
try:
response = yield http_client.fetch(url, headers=header)
except (HTTPError, ConnectionRefusedError):
# local formgrader isn't running
self.log.warning("Local formgrader does not seem to be running")
raise gen.Return([])
try:
response = json.loads(response.body.decode())
status = response['status']
except:
self.log.error("Couldn't decode response from local formgrader")
self.log.error(traceback.format_exc())
raise gen.Return([])
coursedir = CourseDirectory(config=config)
if status:
raise gen.Return([{
'course_id': coursedir.course_id,
'url': base_url + '/formgrader',
'kind': 'local'
}])
self.log.error("Local formgrader not accessible")
raise gen.Return([])
@gen.coroutine
def check_for_noauth_jupyterhub_formgraders(self, config):
try:
get_jupyterhub_user()
except JupyterhubEnvironmentError:
# Not running on JupyterHub.
raise gen.Return([])
# We are running on JupyterHub, so maybe there's a formgrader
# service. Check if we have a course id and if so guess the path to the
# formgrader.
coursedir = CourseDirectory(config=config)
if not coursedir.course_id:
raise gen.Return([])
url = self.get_base_url() + "/services/" + coursedir.course_id + "/formgrader"
auth = get_jupyterhub_authorization()
http_client = AsyncHTTPClient()
try:
yield http_client.fetch(url, headers=auth)
except:
self.log.error("Formgrader not available at URL: %s", url)
raise gen.Return([])
courses = [{
'course_id': coursedir.course_id,
'url': url,
'kind': 'jupyterhub'
}]
raise gen.Return(courses)
@gen.coroutine
def check_for_jupyterhub_formgraders(self, config):
# first get the list of courses from the authenticator
auth = Authenticator(config=config)
try:
course_names = auth.get_student_courses("*")
except JupyterhubEnvironmentError:
# not running on JupyterHub, or otherwise don't have access
raise gen.Return([])
# If course_names is None, that means either we're not running with
# JupyterHub, or we just have a single class for all students and
# instructors.
if course_names is None:
courses = yield self.check_for_noauth_jupyterhub_formgraders(config)
raise gen.Return(courses)
base_url = get_jupyterhub_api_url()
url = base_url + "/services"
auth = get_jupyterhub_authorization()
http_client = AsyncHTTPClient()
response = yield http_client.fetch(url, headers=auth)
try:
services = json.loads(response.body.decode())
except:
self.log.error("Failed to decode response: %s", response.body)
raise gen.Return([])
courses = []
for course in course_names:
if course not in services:
self.log.warning("Couldn't find formgrader for course '%s'", course)
continue
service = services[course]
courses.append({
'course_id': course,
'url': self.get_base_url() + service['prefix'].rstrip('/') + "/formgrader",
'kind': 'jupyterhub'
})
raise gen.Return(courses)
@gen.coroutine
@web.authenticated
def get(self):
with chdir(self.assignment_dir):
try:
config = self.load_config()
courses = []
local_courses = yield self.check_for_local_formgrader(config)
jhub_courses = yield self.check_for_jupyterhub_formgraders(config)
courses.extend(local_courses)
courses.extend(jhub_courses)
except:
self.log.error(traceback.format_exc())
retvalue = {
"success": False,
"value": traceback.format_exc()
}
else:
retvalue = {
"success": True,
"value": sorted(courses, key=lambda x: x['course_id'])
}
raise gen.Return(self.finish(json.dumps(retvalue)))
class NbGraderVersionHandler(IPythonHandler):
@web.authenticated
def get(self):
ui_version = self.get_argument('version')
if ui_version != nbgrader_version:
msg = dedent(
"""
The version of the Course List nbextension does not match
the server extension; the nbextension version is {} while the
server version is {}. This can happen if you have recently
upgraded nbgrader, and may cause this extension to not work
correctly. To fix the problem, please see the nbgrader
installation instructions:
http://nbgrader.readthedocs.io/en/stable/user_guide/installation.html
""".format(ui_version, nbgrader_version)
).strip().replace("\n", " ")
self.log.error(msg)
result = {"success": False, "message": msg}
else:
result = {"success": True}
self.finish(json.dumps(result))
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
default_handlers = [
(r"/formgraders", CourseListHandler),
(r"/nbgrader_version", NbGraderVersionHandler)
]
def load_jupyter_server_extension(nbapp):
"""Load the nbserver"""
nbapp.log.info("Loading the course_list nbgrader serverextension")
webapp = nbapp.web_app
base_url = webapp.settings['base_url']
webapp.settings['assignment_dir'] = nbapp.notebook_dir
webapp.add_handlers(".*$", [
(ujoin(base_url, pat), handler)
for pat, handler in default_handlers
])
|
|
from distutils.version import LooseVersion
from dipy.utils.optpkg import optional_package
tf, have_tf, _ = optional_package('tensorflow')
if have_tf:
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
raise ImportError('Please upgrade to TensorFlow 2+')
class SingleLayerPerceptron(object):
def __init__(self, input_shape=(28, 28),
num_hidden=128, act_hidden='relu',
dropout=0.2,
num_out=10, act_out='softmax',
optimizer='adam',
loss='sparse_categorical_crossentropy'):
"""Single Layer Perceptron with Dropout.
Parameters
----------
input_shape : tuple
Shape of data to be trained
num_hidden : int
Number of nodes in hidden layer
act_hidden : string
Activation function used in hidden layer
dropout : float
Dropout ratio
num_out : 10
Number of nodes in output layer
act_out : string
Activation function used in output layer
optimizer : string
Select optimizer. Default adam.
loss : string
Select loss function for measuring accuracy.
Default sparse_categorical_crossentropy.
"""
self.accuracy = None
self.loss = None
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=input_shape),
tf.keras.layers.Dense(num_hidden, activation=act_hidden),
tf.keras.layers.Dropout(dropout),
tf.keras.layers.Dense(num_out, activation=act_out)
])
model.compile(optimizer=optimizer,
loss=loss,
metrics=['accuracy'])
self.model = model
def summary(self):
"""Get the summary of the model.
The summary is textual and includes information about:
The layers and their order in the model.
The output shape of each layer.
Returns
-------
summary : NoneType
the summary of the model
"""
return self.model.summary()
def fit(self, x_train, y_train, epochs=5):
"""Train the model on train dataset.
The fit method will train the model for a fixed
number of epochs (iterations) on a dataset.
Parameters
----------
x_train : ndarray
the x_train is the train dataset
y_train : ndarray shape=(BatchSize,)
the y_train is the labels of the train dataset
epochs : int (Default = 5)
the number of epochs
Returns
-------
hist : object
A History object. Its History.history attribute is a record of
training loss values and metrics values at successive epochs
"""
hist = self.model.fit(x_train, y_train, epochs=epochs)
self.accuracy = hist.history['accuracy'][0]
self.loss = hist.history['loss'][0]
return hist
def evaluate(self, x_test, y_test, verbose=2):
"""Evaluate the model on test dataset.
The evaluate method will evaluate the model on a test
dataset.
Parameters
----------
x_test : ndarray
the x_test is the test dataset
y_test : ndarray shape=(BatchSize,)
the y_test is the labels of the test dataset
verbose : int (Default = 2)
By setting verbose 0, 1 or 2 you just say how do you want to
'see' the training progress for each epoch.
Returns
-------
evaluate : List
return list of loss value and accuracy value on test dataset
"""
return self.model.evaluate(x_test, y_test, verbose=verbose)
def predict(self, x_test):
"""Predict the output from input samples.
The predict method will generates output predictions
for the input samples.
Parameters
----------
x_train : ndarray
the x_test is the test dataset or input samples
Returns
-------
predict : ndarray shape(TestSize,OutputSize)
Numpy array(s) of predictions.
"""
return self.model.predict(x_test)
class MultipleLayerPercepton(object):
def __init__(self, input_shape=(28, 28),
num_hidden=[128],
act_hidden='relu',
dropout=0.2,
num_out=10,
act_out='softmax',
loss='sparse_categorical_crossentropy',
optimizer='adam'):
"""Multiple Layer Perceptron with Dropout.
Parameters
----------
input_shape : tuple
Shape of data to be trained
num_hidden : list
List of number of nodes in hidden layers
act_hidden : string
Activation function used in hidden layer
dropout : float
Dropout ratio
num_out : 10
Number of nodes in output layer
act_out : string
Activation function used in output layer
optimizer : string
Select optimizer. Default adam.
loss : string
Select loss function for measuring accuracy.
Default sparse_categorical_crossentropy.
"""
self.input_shape = input_shape
self.num_hidden = num_hidden
self.act_hidden = act_hidden
self.dropout = dropout
self.num_out = num_out
self.act_out = act_out
self.loss = loss
self.optimizer = optimizer
self.accuracy = None
# model building
inp = tf.keras.layers.Input(shape=self.input_shape)
x = tf.keras.layers.Flatten()(inp)
for i in range(len(self.num_hidden)):
x = tf.keras.layers.Dense(self.num_hidden[i])(x)
x = tf.keras.layers.Dropout(self.dropout)(x)
out = tf.keras.layers.Dense(self.num_out, activation=self.act_out)(x)
self.model = tf.keras.models.Model(inputs=inp, outputs=out)
# compiling the model
self.model.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=['accuracy'])
def summary(self):
"""Get the summary of the model.
The summary is textual and includes information about:
The layers and their order in the model.
The output shape of each layer.
Returns
-------
summary : NoneType
the summary of the model
"""
return self.model.summary()
def fit(self, x_train, y_train, epochs=5):
"""Train the model on train dataset.
The fit method will train the model for a fixed
number of epochs (iterations) on a dataset.
Parameters
----------
x_train : ndarray
the x_train is the train dataset
y_train : ndarray shape=(BatchSize,)
the y_train is the labels of the train dataset
epochs : int (Default = 5)
the number of epochs
Returns
-------
hist : object
A History object. Its History.history attribute is a record of
training loss values and metrics values at successive epochs
"""
hist = self.model.fit(x_train, y_train, epochs=epochs)
self.accuracy = hist.history['accuracy'][0]
self.loss = hist.history['loss'][0]
return hist
def evaluate(self, x_test, y_test, verbose=2):
"""Evaluate the model on test dataset.
The evaluate method will evaluate the model on a test
dataset.
Parameters
----------
x_test : ndarray
the x_test is the test dataset
y_test : ndarray shape=(BatchSize,)
the y_test is the labels of the test dataset
verbose : int (Default = 2)
By setting verbose 0, 1 or 2 you just say how do you want to
'see' the training progress for each epoch.
Returns
-------
evaluate : List
return list of loss value and accuracy value on test dataset
"""
return self.model.evaluate(x_test, y_test, verbose=verbose)
def predict(self, x_test):
"""Predict the output from input samples.
The predict method will generates output predictions
for the input samples.
Parameters
----------
x_train : ndarray
the x_test is the test dataset or input samples
Returns
-------
predict : ndarray shape(TestSize,OutputSize)
Numpy array(s) of predictions.
"""
return self.model.predict(x_test)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
"""
Utilities for streaming from several file-like data storages: S3 / HDFS / standard
filesystem / compressed files..., using a single, Pythonic API.
The streaming makes heavy use of generators and pipes, to avoid loading
full file contents into memory, allowing work with arbitrarily large files.
The main methods are:
* `smart_open()`, which opens the given file for reading/writing
* `s3_iter_bucket()`, which goes over all keys in an S3 bucket in parallel
"""
import logging
import os
import subprocess
import sys
import requests
if sys.version_info[0] == 2:
import httplib
elif sys.version_info[0] == 3:
import http.client as httplib
from boto.compat import BytesIO, urlsplit, six
import boto.s3.key
logger = logging.getLogger(__name__)
# Multiprocessing is unavailable in App Engine (and possibly other sandboxes).
# The only method currently relying on it is s3_iter_bucket, which is instructed
# not to use it by the NO_MULTIPROCESSING flag.
try:
import multiprocessing.pool
except ImportError:
logger.warning("multiprocessing could not be imported and won't be used")
NO_MULTIPROCESSING = True
from itertools import imap
else:
NO_MULTIPROCESSING = False
S3_MIN_PART_SIZE = 50 * 1024**2 # minimum part size for S3 multipart uploads
WEBHDFS_MIN_PART_SIZE = 50 * 1024**2 # minimum part size for HDFS multipart uploads
def smart_open(uri, mode="rb", **kw):
"""
Open the given S3 / HDFS / filesystem file pointed to by `uri` for reading or writing.
The only supported modes for now are 'rb' (read, default) and 'wb' (replace & write).
The reads/writes are memory efficient (streamed) and therefore suitable for
arbitrarily large files.
The `uri` can be either:
1. a URI for the local filesystem (compressed ``.gz`` or ``.bz2`` files handled automatically):
`./lines.txt`, `/home/joe/lines.txt.gz`, `file:///home/joe/lines.txt.bz2`
2. a URI for HDFS: `hdfs:///some/path/lines.txt`
3. a URI for Amazon's S3 (can also supply credentials inside the URI):
`s3://my_bucket/lines.txt`, `s3://my_aws_key_id:key_secret@my_bucket/lines.txt`
4. an instance of the boto.s3.key.Key class.
Examples::
>>> # stream lines from S3; you can use context managers too:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt') as fin:
... for line in fin:
... print line
>>> # you can also use a boto.s3.key.Key instance directly:
>>> key = boto.connect_s3().get_bucket("my_bucket").get_key("my_key")
>>> with smart_open.smart_open(key) as fin:
... for line in fin:
... print line
>>> # stream line-by-line from an HDFS file
>>> for line in smart_open.smart_open('hdfs:///user/hadoop/my_file.txt'):
... print line
>>> # stream content *into* S3:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt', 'wb') as fout:
... for line in ['first line', 'second line', 'third line']:
... fout.write(line + '\n')
>>> # stream from/to (compressed) local files:
>>> for line in smart_open.smart_open('/home/radim/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('/home/radim/my_file.txt.gz'):
... print line
>>> with smart_open.smart_open('/home/radim/my_file.txt.gz', 'wb') as fout:
... fout.write("hello world!\n")
>>> with smart_open.smart_open('/home/radim/another.txt.bz2', 'wb') as fout:
... fout.write("good bye!\n")
"""
# validate mode parameter
if not isinstance(mode, six.string_types):
raise TypeError('mode should be a string')
if isinstance(uri, six.string_types):
# this method just routes the request to classes handling the specific storage
# schemes, depending on the URI protocol in `uri`
parsed_uri = ParseUri(uri)
if parsed_uri.scheme in ("file", ):
# local files -- both read & write supported
# compression, if any, is determined by the filename extension (.gz, .bz2)
return file_smart_open(parsed_uri.uri_path, mode)
elif parsed_uri.scheme in ("s3", "s3n"):
# Get an S3 host. It is required for sigv4 operations.
host = kw.pop('host', None)
if not host:
host = boto.config.get('s3', 'host', 's3.amazonaws.com')
# For credential order of precedence see
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html#credentials
s3_connection = boto.connect_s3(
aws_access_key_id=parsed_uri.access_id,
host=host,
aws_secret_access_key=parsed_uri.access_secret,
profile_name=kw.pop('profile_name', None))
bucket = s3_connection.get_bucket(parsed_uri.bucket_id)
if mode in ('r', 'rb'):
key = bucket.get_key(parsed_uri.key_id)
if key is None:
raise KeyError(parsed_uri.key_id)
return S3OpenRead(key)
elif mode in ('w', 'wb'):
key = bucket.get_key(parsed_uri.key_id, validate=False)
if key is None:
raise KeyError(parsed_uri.key_id)
return S3OpenWrite(key, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
elif parsed_uri.scheme in ("hdfs", ):
if mode in ('r', 'rb'):
return HdfsOpenRead(parsed_uri, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
elif parsed_uri.scheme in ("webhdfs", ):
if mode in ('r', 'rb'):
return WebHdfsOpenRead(parsed_uri, **kw)
elif mode in ('w', 'wb'):
return WebHdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
else:
raise NotImplementedError("scheme %r is not supported", parsed_uri.scheme)
elif isinstance(uri, boto.s3.key.Key):
# handle case where we are given an S3 key directly
if mode in ('r', 'rb'):
return S3OpenRead(uri)
elif mode in ('w', 'wb'):
return S3OpenWrite(uri, **kw)
elif hasattr(uri, 'read'):
# simply pass-through if already a file-like
return uri
else:
raise TypeError('don\'t know how to handle uri %s' % repr(uri))
class ParseUri(object):
"""
Parse the given URI.
Supported URI schemes are "file", "s3", "s3n" and "hdfs".
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
"""
def __init__(self, uri, default_scheme="file"):
"""
Assume `default_scheme` if no scheme given in `uri`.
"""
if os.name == 'nt':
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if '://' not in uri:
# no protocol given => assume a local file
uri = 'file://' + uri
parsed_uri = urlsplit(uri)
self.scheme = parsed_uri.scheme if parsed_uri.scheme else default_scheme
if self.scheme == "hdfs":
self.uri_path = parsed_uri.netloc + parsed_uri.path
self.uri_path = "/" + self.uri_path.lstrip("/")
if not self.uri_path:
raise RuntimeError("invalid HDFS URI: %s" % uri)
elif self.scheme == "webhdfs":
self.uri_path = parsed_uri.netloc + "/webhdfs/v1" + parsed_uri.path
if not self.uri_path:
raise RuntimeError("invalid WebHDFS URI: %s" % uri)
elif self.scheme in ("s3", "s3n"):
self.bucket_id = (parsed_uri.netloc + parsed_uri.path).split('@')
self.key_id = None
if len(self.bucket_id) == 1:
# URI without credentials: s3://bucket/object
self.bucket_id, self.key_id = self.bucket_id[0].split('/', 1)
# "None" credentials are interpreted as "look for credentials in other locations" by boto
self.access_id, self.access_secret = None, None
elif len(self.bucket_id) == 2 and len(self.bucket_id[0].split(':')) == 2:
# URI in full format: s3://key:secret@bucket/object
# access key id: [A-Z0-9]{20}
# secret access key: [A-Za-z0-9/+=]{40}
acc, self.bucket_id = self.bucket_id
self.access_id, self.access_secret = acc.split(':')
self.bucket_id, self.key_id = self.bucket_id.split('/', 1)
else:
# more than 1 '@' means invalid uri
# Bucket names must be at least 3 and no more than 63 characters long.
# Bucket names must be a series of one or more labels.
# Adjacent labels are separated by a single period (.).
# Bucket names can contain lowercase letters, numbers, and hyphens.
# Each label must start and end with a lowercase letter or a number.
raise RuntimeError("invalid S3 URI: %s" % uri)
elif self.scheme == 'file':
self.uri_path = parsed_uri.netloc + parsed_uri.path
if not self.uri_path:
raise RuntimeError("invalid file URI: %s" % uri)
else:
raise NotImplementedError("unknown URI scheme %r in %r" % (self.scheme, uri))
class S3OpenRead(object):
"""
Implement streamed reader from S3, as an iterable & context manager.
"""
def __init__(self, read_key):
if not hasattr(read_key, "bucket") and not hasattr(read_key, "name") and not hasattr(read_key, "read") \
and not hasattr(read_key, "close"):
raise TypeError("can only process S3 keys")
self.read_key = read_key
self.line_generator = s3_iter_lines(self.read_key)
def __iter__(self):
key = self.read_key.bucket.get_key(self.read_key.name)
if key is None:
raise KeyError(self.read_key.name)
return s3_iter_lines(key)
def read(self, size=None):
"""
Read a specified number of bytes from the key.
Note read() and line iteration (`for line in self: ...`) each have their
own file position, so they are independent. Doing a `read` will not affect
the line iteration, and vice versa.
"""
if not size or size < 0:
# For compatibility with standard Python, `read(negative)` = read the rest of the file.
# Otherwise, boto would read *from the start* if given size=-1.
size = 0
return self.read_key.read(size)
def seek(self, offset, whence=0):
"""
Seek to the specified position.
Only seeking to the beginning (offset=0) supported for now.
"""
if whence != 0 or offset != 0:
raise NotImplementedError("seek other than offset=0 not implemented yet")
self.read_key.close(fast=True)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.read_key.close()
def __str__(self):
return "%s<key: %s>" % (
self.__class__.__name__, self.read_key
)
class HdfsOpenRead(object):
"""
Implement streamed reader from HDFS, as an iterable & context manager.
"""
def __init__(self, parsed_uri):
if parsed_uri.scheme not in ("hdfs"):
raise TypeError("can only process HDFS files")
self.parsed_uri = parsed_uri
def __iter__(self):
hdfs = subprocess.Popen(["hdfs", "dfs", "-cat", self.parsed_uri.uri_path], stdout=subprocess.PIPE)
return hdfs.stdout
def read(self, size=None):
raise NotImplementedError("read() not implemented yet")
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class WebHdfsOpenRead(object):
"""
Implement streamed reader from WebHDFS, as an iterable & context manager.
NOTE: it does not support kerberos authentication yet
"""
def __init__(self, parsed_uri):
if parsed_uri.scheme not in ("webhdfs"):
raise TypeError("can only process WebHDFS files")
self.parsed_uri = parsed_uri
self.offset = 0
def __iter__(self):
payload = {"op": "OPEN"}
response = requests.get("http://" + self.parsed_uri.uri_path, params=payload, stream=True)
return response.iter_lines()
def read(self, size=None):
"""
Read the specific number of bytes from the file
Note read() and line iteration (`for line in self: ...`) each have their
own file position, so they are independent. Doing a `read` will not affect
the line iteration, and vice versa.
"""
if not size or size < 0:
payload = {"op": "OPEN", "offset": self.offset}
self.offset = 0
else:
payload = {"op": "OPEN", "offset": self.offset, "length": size}
self.offset = self.offset + size
response = requests.get("http://" + self.parsed_uri.uri_path, params=payload, stream=True)
return response.content
def seek(self, offset, whence=0):
"""
Seek to the specified position.
Only seeking to the beginning (offset=0) supported for now.
"""
if whence == 0 and offset == 0:
self.offset = 0
elif whence == 0:
self.offset = offset
else:
raise NotImplementedError("operations with whence not implemented yet")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def file_smart_open(fname, mode='rb'):
"""
Stream from/to local filesystem, transparently (de)compressing gzip and bz2
files if necessary.
"""
_, ext = os.path.splitext(fname)
if ext == '.bz2':
PY2 = sys.version_info[0] == 2
if PY2:
from bz2file import BZ2File
else:
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
class S3OpenWrite(object):
"""
Context manager for writing into S3 files.
"""
def __init__(self, outkey, min_part_size=S3_MIN_PART_SIZE, **kw):
"""
Streamed input is uploaded in chunks, as soon as `min_part_size` bytes are
accumulated (50MB by default). The minimum chunk size allowed by AWS S3
is 5MB.
"""
if not hasattr(outkey, "bucket") and not hasattr(outkey, "name"):
raise TypeError("can only process S3 keys")
self.outkey = outkey
self.min_part_size = min_part_size
if min_part_size < 5 * 1024 ** 2:
logger.warning("S3 requires minimum part size >= 5MB; multipart upload may fail")
# initialize mulitpart upload
self.mp = self.outkey.bucket.initiate_multipart_upload(self.outkey, **kw)
# initialize stats
self.lines = []
self.total_size = 0
self.chunk_bytes = 0
self.parts = 0
def __str__(self):
return "%s<key: %s, min_part_size: %s>" % (
self.__class__.__name__, self.outkey, self.min_part_size,
)
def write(self, b):
"""
Write the given bytes (binary string) into the S3 file from constructor.
Note there's buffering happening under the covers, so this may not actually
do any HTTP transfer right away.
"""
if isinstance(b, six.text_type):
# not part of API: also accept unicode => encode it as utf8
b = b.encode('utf8')
if not isinstance(b, six.binary_type):
raise TypeError("input must be a binary string")
self.lines.append(b)
self.chunk_bytes += len(b)
self.total_size += len(b)
if self.chunk_bytes >= self.min_part_size:
buff = b"".join(self.lines)
logger.info("uploading part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.mp.upload_part_from_file(BytesIO(buff), part_num=self.parts + 1)
logger.debug("upload of part #%i finished" % self.parts)
self.parts += 1
self.lines, self.chunk_bytes = [], 0
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def close(self):
buff = b"".join(self.lines)
if buff:
logger.info("uploading last part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.mp.upload_part_from_file(BytesIO(buff), part_num=self.parts + 1)
logger.debug("upload of last part #%i finished" % self.parts)
if self.total_size:
self.mp.complete_upload()
else:
# AWS complains with "The XML you provided was not well-formed or did not validate against our published schema"
# when the input is completely empty => abort the upload, no file created
logger.info("empty input, ignoring multipart upload")
self.outkey.bucket.cancel_multipart_upload(self.mp.key_name, self.mp.id)
# So, instead, create an empty file like this
logger.info("setting an empty value for the key")
self.outkey.set_contents_from_string('')
def __enter__(self):
return self
def _termination_error(self):
logger.exception("encountered error while terminating multipart upload; attempting cancel")
self.outkey.bucket.cancel_multipart_upload(self.mp.key_name, self.mp.id)
logger.info("cancel completed")
def __exit__(self, type, value, traceback):
if type is not None:
self._termination_error()
return False
try:
self.close()
except:
self._termination_error()
raise
class WebHdfsOpenWrite(object):
"""
Context manager for writing into webhdfs files
"""
def __init__(self, parsed_uri, min_part_size=WEBHDFS_MIN_PART_SIZE):
if parsed_uri.scheme not in ("webhdfs"):
raise TypeError("can only process WebHDFS files")
self.parsed_uri = parsed_uri
self.closed = False
self.min_part_size = min_part_size
# creating empty file first
payload = {"op": "CREATE", "overwrite": True}
init_response = requests.put("http://" + self.parsed_uri.uri_path, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
uri = init_response.headers['location']
response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
if not response.status_code == httplib.CREATED:
raise WebHdfsException(str(response.status_code) + "\n" + response.content)
self.lines = []
self.parts = 0
self.chunk_bytes = 0
self.total_size = 0
def upload(self, data):
payload = {"op": "APPEND"}
init_response = requests.post("http://" + self.parsed_uri.uri_path, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
uri = init_response.headers['location']
response = requests.post(uri, data=data, headers={'content-type': 'application/octet-stream'})
if not response.status_code == httplib.OK:
raise WebHdfsException(str(response.status_code) + "\n" + response.content)
def write(self, b):
"""
Write the given bytes (binary string) into the WebHDFS file from constructor.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if isinstance(b, six.text_type):
# not part of API: also accept unicode => encode it as utf8
b = b.encode('utf8')
if not isinstance(b, six.binary_type):
raise TypeError("input must be a binary string")
self.lines.append(b)
self.chunk_bytes += len(b)
self.total_size += len(b)
if self.chunk_bytes >= self.min_part_size:
buff = b"".join(self.lines)
logger.info("uploading part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.upload(buff)
logger.debug("upload of part #%i finished" % self.parts)
self.parts += 1
self.lines, self.chunk_bytes = [], 0
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def close(self):
buff = b"".join(self.lines)
if buff:
logger.info("uploading last part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.upload(buff)
logger.debug("upload of last part #%i finished" % self.parts)
self.closed = True
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def s3_iter_bucket_process_key(key):
"""
Conceptually part of `s3_iter_bucket`, but must remain top-level method because
of pickling visibility.
"""
return key, key.get_contents_as_string()
def s3_iter_bucket(bucket, prefix='', accept_key=lambda key: True, key_limit=None, workers=16):
"""
Iterate and download all S3 files under `bucket/prefix`, yielding out
`(key, key content)` 2-tuples (generator).
`accept_key` is a function that accepts a key name (unicode string) and
returns True/False, signalling whether the given key should be downloaded out or
not (default: accept all keys).
If `key_limit` is given, stop after yielding out that many results.
The keys are processed in parallel, using `workers` processes (default: 16),
to speed up downloads greatly. If multiprocessing is not available, thus
NO_MULTIPROCESSING is True, this parameter will be ignored.
Example::
>>> mybucket = boto.connect_s3().get_bucket('mybucket')
>>> # get all JSON files under "mybucket/foo/"
>>> for key, content in s3_iter_bucket(mybucket, prefix='foo/', accept_key=lambda key: key.endswith('.json')):
... print key, len(content)
>>> # limit to 10k files, using 32 parallel workers (default is 16)
>>> for key, content in s3_iter_bucket(mybucket, key_limit=10000, workers=32):
... print key, len(content)
"""
total_size, key_no = 0, -1
keys = (key for key in bucket.list(prefix=prefix) if accept_key(key.name))
if NO_MULTIPROCESSING:
logger.info("iterating over keys from %s without multiprocessing" % bucket)
iterator = imap(s3_iter_bucket_process_key, keys)
else:
logger.info("iterating over keys from %s with %i workers" % (bucket, workers))
pool = multiprocessing.pool.Pool(processes=workers)
iterator = pool.imap_unordered(s3_iter_bucket_process_key, keys)
for key_no, (key, content) in enumerate(iterator):
if key_no % 1000 == 0:
logger.info("yielding key #%i: %s, size %i (total %.1fMB)" %
(key_no, key, len(content), total_size / 1024.0 ** 2))
yield key, content
key.close()
total_size += len(content)
if key_limit is not None and key_no + 1 >= key_limit:
# we were asked to output only a limited number of keys => we're done
break
if not NO_MULTIPROCESSING:
pool.terminate()
logger.info("processed %i keys, total size %i" % (key_no + 1, total_size))
def s3_iter_lines(key):
"""
Stream an object from S3 line by line (generator).
`key` must be a `boto.key.Key` object.
"""
# check valid object on input
if not isinstance(key, boto.s3.key.Key):
raise TypeError("expected boto.key.Key object on input")
buf = b''
# keep reading chunks of bytes into the buffer
for chunk in key:
buf += chunk
start = 0
# process all lines within the current buffer
while True:
end = buf.find(b'\n', start) + 1
if end:
yield buf[start : end]
start = end
else:
# no more newlines => break out to read more data from s3 into the buffer
buf = buf[start : ]
break
# process the last line, too
if buf:
yield buf
class WebHdfsException(Exception):
def __init__(self, msg=str()):
self.msg = msg
super(WebHdfsException, self).__init__(self.msg)
|
|
import unittest
import mock
from requests.exceptions import ConnectionError
from tsuru_unit_agent.main import parse_args, main, TEMP_ENV_FILE
class TestMain(unittest.TestCase):
def test_parse_args(self):
args = parse_args(['a', 'b', 'c', 'd', 'run'])
self.assertEqual(args.action, 'run')
self.assertEqual(args.url, 'a')
self.assertEqual(args.token, 'b')
self.assertEqual(args.app_name, 'c')
self.assertEqual(args.start_cmd, 'd')
def test_parse_args_default_action(self):
args = parse_args(['a', 'b', 'c', 'd'])
self.assertEqual(args.action, 'run')
self.assertEqual(args.url, 'a')
self.assertEqual(args.token, 'b')
self.assertEqual(args.app_name, 'c')
self.assertEqual(args.start_cmd, 'd')
def test_parse_args_deploy(self):
args = parse_args(['a', 'b', 'c', 'd', 'deploy'])
self.assertEqual(args.action, 'deploy')
self.assertEqual(args.url, 'a')
self.assertEqual(args.token, 'b')
self.assertEqual(args.app_name, 'c')
self.assertEqual(args.start_cmd, 'd')
def test_parse_args_invalid(self):
self.assertRaises(SystemExit, parse_args, [])
self.assertRaises(SystemExit, parse_args, ['a', 'b', 'c', 'd', 'e'])
@mock.patch('sys.argv', ['', 'http://localhost', 'token', 'app1', 'mycmd', 'deploy'])
@mock.patch('tsuru_unit_agent.main.tasks')
@mock.patch('tsuru_unit_agent.main.Client')
def test_main_deploy_action(self, client_mock, tasks_mock):
register_mock = client_mock.return_value.register_unit
register_mock.return_value = ({'env1': 'val1'}, "0.16.0")
exec_script_mock = tasks_mock.execute_start_script
load_yaml_mock = tasks_mock.load_app_yaml
load_yaml_mock.return_value = {'hooks': {'build': ['cmd_1', 'cmd_2']}}
post_app_yaml_mock = client_mock.return_value.post_app_yaml
load_procfile_mock = tasks_mock.load_procfile
load_procfile_mock.return_value = 'web: python myproject.py\nworker: ./startworker'
parse_procfile_mock = tasks_mock.parse_procfile
parse_procfile_mock.return_value = {'web': 'python myproject.py',
'worker': './startworker'}
run_build_hooks_mock = tasks_mock.run_build_hooks
write_circus_conf_mock = tasks_mock.write_circus_conf
save_apprc_mock = tasks_mock.save_apprc_file
main()
call_count = len(client_mock.mock_calls) + len(tasks_mock.mock_calls)
self.assertEqual(call_count, 11)
client_mock.assert_called_once_with('http://localhost', 'token')
register_mock.assert_any_call('app1')
v = load_yaml_mock.return_value
v['procfile'] = load_procfile_mock.return_value
v['processes'] = parse_procfile_mock.return_value
register_mock.assert_any_call('app1', v)
save_apprc_mock.assert_called_once_with(register_mock.return_value[0])
exec_script_mock.assert_called_once_with('mycmd')
load_yaml_mock.assert_called_once_with()
write_circus_conf_mock.assert_called_once_with(envs={'env1': 'val1'})
post_app_yaml_mock.assert_called_once_with('app1', load_yaml_mock.return_value)
run_build_hooks_mock.assert_called_once_with(load_yaml_mock.return_value,
envs={'env1': 'val1'})
@mock.patch('sys.argv', ['', 'http://localhost', 'token', 'app1', 'mycmd', 'deploy'])
@mock.patch('tsuru_unit_agent.main.tasks')
@mock.patch('tsuru_unit_agent.main.Client')
@mock.patch('os.unlink')
def test_main_deploy_action_no_apprc(self, unlink_mock, client_mock, tasks_mock):
register_mock = client_mock.return_value.register_unit
register_mock.return_value = ({'env1': 'val1', 'port': '8888', 'PORT': '8888'}, "0.17.0")
exec_script_mock = tasks_mock.execute_start_script
load_yaml_mock = tasks_mock.load_app_yaml
load_yaml_mock.return_value = {'hooks': {'build': ['cmd_1', 'cmd_2']}}
post_app_yaml_mock = client_mock.return_value.post_app_yaml
load_procfile_mock = tasks_mock.load_procfile
load_procfile_mock.return_value = 'web: python myproject.py\nworker: ./startworker'
parse_procfile_mock = tasks_mock.parse_procfile
parse_procfile_mock.return_value = {'web': 'python myproject.py',
'worker': './startworker'}
run_build_hooks_mock = tasks_mock.run_build_hooks
write_circus_conf_mock = tasks_mock.write_circus_conf
save_apprc_mock = tasks_mock.save_apprc_file
main()
call_count = len(client_mock.mock_calls) + len(tasks_mock.mock_calls)
self.assertEqual(call_count, 12)
client_mock.assert_called_once_with('http://localhost', 'token')
register_mock.assert_any_call('app1')
v = load_yaml_mock.return_value
v['procfile'] = load_procfile_mock.return_value
v['processes'] = parse_procfile_mock.return_value
register_mock.assert_any_call('app1', v)
expected_calls = [mock.call({'port': '8888', 'PORT': '8888'}),
mock.call({'env1': 'val1', 'port': '8888', 'PORT': '8888'},
file_path=TEMP_ENV_FILE)]
self.assertEqual(expected_calls, save_apprc_mock.mock_calls)
unlink_mock.assert_called_once_with(TEMP_ENV_FILE)
exec_script_mock.assert_called_once_with('mycmd')
load_yaml_mock.assert_called_once_with()
write_circus_conf_mock.assert_called_once_with(envs=register_mock.return_value[0])
post_app_yaml_mock.assert_called_once_with('app1', load_yaml_mock.return_value)
run_build_hooks_mock.assert_called_once_with(load_yaml_mock.return_value,
envs=register_mock.return_value[0])
@mock.patch('sys.argv', ['', 'http://localhost', 'token', 'app1', 'mycmd', 'run'])
@mock.patch('tsuru_unit_agent.main.tasks')
@mock.patch('tsuru_unit_agent.main.Client')
def test_main_run_action(self, client_mock, tasks_mock):
register_mock = client_mock.return_value.register_unit
register_mock.return_value = ({'env1': 'val1'}, "0.16.0")
save_apprc_mock = tasks_mock.save_apprc_file
exec_script_mock = tasks_mock.execute_start_script
run_restart_hooks_mock = tasks_mock.run_restart_hooks
write_circus_conf_mock = tasks_mock.write_circus_conf
load_yaml_mock = tasks_mock.load_app_yaml
load_yaml_mock.return_value = {'hooks': {'build': ['cmd_1', 'cmd_2']}}
main()
call_count = len(client_mock.mock_calls) + len(tasks_mock.mock_calls)
self.assertEqual(call_count, 8)
write_circus_conf_mock.assert_called_once_with(envs={'env1': 'val1'})
client_mock.assert_called_once_with('http://localhost', 'token')
register_mock.assert_called_once_with('app1')
save_apprc_mock.assert_called_once_with(register_mock.return_value[0])
exec_script_mock.assert_called_once_with('mycmd', envs={'env1': 'val1'}, with_shell=False)
load_yaml_mock.assert_called_once_with()
run_restart_hooks_mock.assert_any_call('before', load_yaml_mock.return_value,
envs={'env1': 'val1'})
run_restart_hooks_mock.assert_any_call('after', load_yaml_mock.return_value,
envs={'env1': 'val1'})
@mock.patch('sys.argv', ['', 'http://localhost', 'token', 'app1', 'mycmd', 'run'])
@mock.patch('tsuru_unit_agent.main.tasks')
@mock.patch('tsuru_unit_agent.main.Client')
@mock.patch('os.unlink')
def test_main_run_action_no_apprc(self, unlink_mock, client_mock, tasks_mock):
register_mock = client_mock.return_value.register_unit
register_mock.return_value = ({'env1': 'val1', 'port': '8888', 'PORT': '8888'}, "0.17.1")
save_apprc_mock = tasks_mock.save_apprc_file
exec_script_mock = tasks_mock.execute_start_script
run_restart_hooks_mock = tasks_mock.run_restart_hooks
write_circus_conf_mock = tasks_mock.write_circus_conf
load_yaml_mock = tasks_mock.load_app_yaml
load_yaml_mock.return_value = {'hooks': {'build': ['cmd_1', 'cmd_2']}}
main()
call_count = len(client_mock.mock_calls) + len(tasks_mock.mock_calls)
self.assertEqual(call_count, 9)
write_circus_conf_mock.assert_called_once_with(envs=register_mock.return_value[0])
client_mock.assert_called_once_with('http://localhost', 'token')
register_mock.assert_called_once_with('app1')
expected_calls = [mock.call({'port': '8888', 'PORT': '8888'}),
mock.call({'env1': 'val1', 'port': '8888', 'PORT': '8888'},
file_path=TEMP_ENV_FILE)]
self.assertEqual(expected_calls, save_apprc_mock.mock_calls)
unlink_mock.assert_called_once_with(TEMP_ENV_FILE)
exec_script_mock.assert_called_once_with('mycmd', envs=register_mock.return_value[0],
with_shell=False)
load_yaml_mock.assert_called_once_with()
run_restart_hooks_mock.assert_any_call('before', load_yaml_mock.return_value,
envs=register_mock.return_value[0])
run_restart_hooks_mock.assert_any_call('after', load_yaml_mock.return_value,
envs=register_mock.return_value[0])
@mock.patch('sys.argv', ['', 'http://localhost', 'token', 'app1', 'mycmd', 'run'])
@mock.patch('tsuru_unit_agent.main.tasks')
@mock.patch('tsuru_unit_agent.main.Client')
def test_main_run_action_api_error(self, client_mock, tasks_mock):
register_mock = client_mock.return_value.register_unit
def fail(*args):
raise ConnectionError()
register_mock.side_effect = fail
save_apprc_mock = tasks_mock.save_apprc_file
parse_apprc_mock = tasks_mock.parse_apprc_file
parse_apprc_mock.return_value = {'env1': 'val1'}
exec_script_mock = tasks_mock.execute_start_script
run_restart_hooks_mock = tasks_mock.run_restart_hooks
write_circus_conf_mock = tasks_mock.write_circus_conf
load_yaml_mock = tasks_mock.load_app_yaml
load_yaml_mock.return_value = {'hooks': {'build': ['cmd_1', 'cmd_2']}}
main()
call_count = len(client_mock.mock_calls) + len(tasks_mock.mock_calls)
self.assertEqual(call_count, 8)
write_circus_conf_mock.assert_called_once_with(envs={'env1': 'val1'})
client_mock.assert_called_once_with('http://localhost', 'token')
register_mock.assert_called_once_with('app1')
parse_apprc_mock.assert_called_once_with()
self.assertEqual(save_apprc_mock.call_count, 0)
exec_script_mock.assert_called_once_with('mycmd', envs={'env1': 'val1'}, with_shell=False)
load_yaml_mock.assert_called_once_with()
run_restart_hooks_mock.assert_any_call('before', load_yaml_mock.return_value,
envs={'env1': 'val1'})
run_restart_hooks_mock.assert_any_call('after', load_yaml_mock.return_value,
envs={'env1': 'val1'})
|
|
# -*- coding: utf-8 -*-
'''
Salt module to manage Unix mounts and the fstab file
'''
# Import python libs
from __future__ import absolute_import
import os
import re
import logging
# Import salt libs
import salt.utils
from salt.utils import which as _which
from salt.exceptions import CommandNotFoundError, CommandExecutionError
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import filter, zip # pylint: disable=import-error,redefined-builtin
# Set up logger
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'mount'
def __virtual__():
'''
Only load on POSIX-like systems
'''
# Disable on Windows, a specific file module exists:
if salt.utils.is_windows():
return False
return True
def _list_mounts():
ret = {}
if __grains__['os'] in ['MacOS', 'Darwin']:
mounts = __salt__['cmd.run_stdout']('mount')
else:
mounts = __salt__['cmd.run_stdout']('mount -l')
for line in mounts.split('\n'):
comps = re.sub(r"\s+", " ", line).split()
if len(comps) >= 3:
ret[comps[2]] = comps[0]
return ret
def _active_mountinfo(ret):
_list = _list_mounts()
filename = '/proc/self/mountinfo'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
blkid_info = __salt__['disk.blkid']()
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
device = comps[2].split(':')
# each line can have any number of
# optional parameters, we use the
# location of the separator field to
# determine the location of the elements
# after it.
_sep = comps.index('-')
device_name = comps[_sep + 2]
device_uuid = None
device_label = None
if device_name:
device_uuid = blkid_info.get(device_name, {}).get('UUID')
device_uuid = device_uuid and device_uuid.lower()
device_label = blkid_info.get(device_name, {}).get('LABEL')
ret[comps[4]] = {'mountid': comps[0],
'parentid': comps[1],
'major': device[0],
'minor': device[1],
'root': comps[3],
'opts': comps[5].split(','),
'fstype': comps[_sep + 1],
'device': device_name,
'alt_device': _list.get(comps[4], None),
'superopts': comps[_sep + 3].split(','),
'device_uuid': device_uuid,
'device_label': device_label}
return ret
def _active_mounts(ret):
'''
List active mounts on Linux systems
'''
_list = _list_mounts()
filename = '/proc/self/mounts'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
ret[comps[1]] = {'device': comps[0],
'alt_device': _list.get(comps[1], None),
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def _active_mounts_freebsd(ret):
'''
List active mounts on FreeBSD systems
'''
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def _active_mounts_solaris(ret):
'''
List active mounts on Solaris systems
'''
for line in __salt__['cmd.run_stdout']('mount -v').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[2]] = {'device': comps[0],
'fstype': comps[4],
'opts': comps[5].split('/')}
return ret
def _active_mounts_openbsd(ret):
'''
List active mounts on OpenBSD systems
'''
for line in __salt__['cmd.run_stdout']('mount -v').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
nod = __salt__['cmd.run_stdout']('ls -l {0}'.format(comps[0]))
nod = ' '.join(nod.split()).split(" ")
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)
if len(parens) > 1:
ret[comps[3]] = {'device': comps[0],
'fstype': comps[5],
'opts': parens[1].split(", "),
'major': str(nod[4].strip(",")),
'minor': str(nod[5]),
'device_uuid': parens[0]}
else:
ret[comps[2]] = {'device': comps[0],
'fstype': comps[4],
'opts': parens[0].split(", ")}
return ret
def _active_mounts_darwin(ret):
'''
List active mounts on Mac OS systems
'''
for line in __salt__['cmd.run_stdout']('mount').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ")
ret[comps[2]] = {'device': comps[0],
'fstype': parens[0],
'opts': parens[1:]}
return ret
def active(extended=False):
'''
List the active mounts.
CLI Example:
.. code-block:: bash
salt '*' mount.active
'''
ret = {}
if __grains__['os'] == 'FreeBSD':
_active_mounts_freebsd(ret)
elif __grains__['os'] == 'Solaris':
_active_mounts_solaris(ret)
elif __grains__['os'] == 'OpenBSD':
_active_mounts_openbsd(ret)
elif __grains__['os'] in ['MacOS', 'Darwin']:
_active_mounts_darwin(ret)
else:
if extended:
try:
_active_mountinfo(ret)
except CommandExecutionError:
_active_mounts(ret)
else:
_active_mounts(ret)
return ret
class _fstab_entry(object):
'''
Utility class for manipulating fstab entries. Primarily we're parsing,
formatting, and comparing lines. Parsing emits dicts expected from
fstab() or raises a ValueError.
Note: We'll probably want to use os.normpath and os.normcase on 'name'
'''
class ParseError(ValueError):
'''Error raised when a line isn't parsible as an fstab entry'''
fstab_keys = ('device', 'name', 'fstype', 'opts', 'dump', 'pass_num')
# preserve data format
compatibility_keys = ('device', 'name', 'fstype', 'opts', 'dump', 'pass')
fstab_format = '{device}\t\t{name}\t{fstype}\t{opts}\t{dump} {pass_num}\n'
@classmethod
def dict_from_line(cls, line, keys=fstab_keys):
if len(keys) != 6:
raise ValueError('Invalid key array: {0}'.format(keys))
if line.startswith('#'):
raise cls.ParseError("Comment!")
comps = line.split()
if len(comps) != 6:
raise cls.ParseError("Invalid Entry!")
return dict(zip(keys, comps))
@classmethod
def from_line(cls, *args, **kwargs):
return cls(** cls.dict_from_line(*args, **kwargs))
@classmethod
def dict_to_line(cls, entry):
return cls.fstab_format.format(**entry)
def __str__(self):
'''string value, only works for full repr'''
return self.dict_to_line(self.criteria)
def __repr__(self):
'''always works'''
return str(self.criteria)
def pick(self, keys):
'''returns an instance with just those keys'''
subset = dict([(key, self.criteria[key]) for key in keys])
return self.__class__(**subset)
def __init__(self, **criteria):
'''Store non-empty, non-null values to use as filter'''
items = [key_value for key_value in six.iteritems(criteria) if key_value[1] is not None]
items = [(key_value1[0], str(key_value1[1])) for key_value1 in items]
self.criteria = dict(items)
@staticmethod
def norm_path(path):
'''Resolve equivalent paths equivalently'''
return os.path.normcase(os.path.normpath(path))
def match(self, line):
'''compare potentially partial criteria against line'''
entry = self.dict_from_line(line)
for key, value in six.iteritems(self.criteria):
if entry[key] != value:
return False
return True
def fstab(config='/etc/fstab'):
'''
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.fopen(config) as ifile:
for line in ifile:
try:
entry = _fstab_entry.dict_from_line(
line,
_fstab_entry.compatibility_keys)
entry['opts'] = entry['opts'].split(',')
ret[entry.pop('name')] = entry
except _fstab_entry.ParseError:
pass
return ret
def rm_fstab(name, device, config='/etc/fstab'):
'''
Remove the mount point from the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.rm_fstab /mnt/foo /dev/sdg
'''
modified = False
criteria = _fstab_entry(name=name, device=device)
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
try:
if criteria.match(line):
modified = True
else:
lines.append(line)
except _fstab_entry.ParseError:
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
if modified:
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
# Note: not clear why we always return 'True'
# --just copying previous behavior at this point...
return True
def set_fstab(
name,
device,
fstype,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
test=False,
match_on='auto',
**kwargs):
'''
Verify that this mount is represented in the fstab, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
# preserve arguments for updating
entry_args = {
'name': name,
'device': device,
'fstype': fstype,
'opts': opts,
'dump': dump,
'pass_num': pass_num,
}
lines = []
ret = None
# Transform match_on into list--items will be checked later
if isinstance(match_on, list):
pass
elif not isinstance(match_on, six.string_types):
msg = 'match_on must be a string or list of strings'
raise CommandExecutionError(msg)
elif match_on == 'auto':
# Try to guess right criteria for auto....
# NOTE: missing some special fstypes here
specialFSes = frozenset([
'none',
'tmpfs',
'sysfs',
'proc',
'fusectl',
'debugfs',
'securityfs',
'devtmpfs',
'cgroup'])
if fstype in specialFSes:
match_on = ['name']
else:
match_on = ['device']
else:
match_on = [match_on]
# generate entry and criteria objects, handle invalid keys in match_on
entry = _fstab_entry(**entry_args)
try:
criteria = entry.pick(match_on)
except KeyError:
filterFn = lambda key: key not in _fstab_entry.fstab_keys
invalid_keys = filter(filterFn, match_on)
msg = 'Unrecognized keys in match_on: "{0}"'.format(invalid_keys)
raise CommandExecutionError(msg)
# parse file, use ret to cache status
if not os.path.isfile(config):
raise CommandExecutionError('Bad config file "{0}"'.format(config))
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
try:
if criteria.match(line):
# Note: If ret isn't None here,
# we've matched multiple lines
ret = 'present'
if entry.match(line):
lines.append(line)
else:
ret = 'change'
lines.append(str(entry))
else:
lines.append(line)
except _fstab_entry.ParseError:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
# add line if not present or changed
if ret is None:
lines.append(str(entry))
ret = 'new'
if ret != 'present': # ret in ['new', 'change']:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return ret
def rm_automaster(name, device, config='/etc/auto_salt'):
'''
Remove the mount point from the auto_master
CLI Example:
.. code-block:: bash
salt '*' mount.rm_automaster /mnt/foo /dev/sdg
'''
contents = automaster(config)
if name not in contents:
return True
# The entry is present, get rid of it
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
lines.append(line)
continue
comps = line.split()
prefix = "/.."
name_chk = comps[0].replace(prefix, "")
device_fmt = comps[2].split(":")
if device:
if name_chk == name and device_fmt[1] == device:
continue
else:
if name_chk == name:
continue
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
# Update automount
__salt__['cmd.run']('automount -cv')
return True
def set_automaster(
name,
device,
fstype,
opts='',
config='/etc/auto_salt',
test=False,
**kwargs):
'''
Verify that this mount is represented in the auto_salt, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_automaster /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
present = False
automaster_file = "/etc/auto_master"
if not os.path.isfile(config):
__salt__['file.touch'](config)
__salt__['file.append'](automaster_file, "/-\t\t\t{0}".format(config))
name = "/..{0}".format(name)
device_fmt = "{0}:{1}".format(fstype, device)
type_opts = "-fstype={0},{1}".format(fstype, opts)
if fstype == 'smbfs':
device_fmt = device_fmt.replace(fstype, "")
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
lines.append(line)
continue
if comps[0] == name or comps[2] == device_fmt:
# check to see if there are changes
# and fix them if there are any
present = True
if comps[0] != name:
change = True
comps[0] = name
if comps[1] != type_opts:
change = True
comps[1] = type_opts
if comps[2] != device_fmt:
change = True
comps[2] = device_fmt
if change:
log.debug(
'auto_master entry for mount point {0} needs to be '
'updated'.format(name)
)
newline = (
'{0}\t{1}\t{2}\n'.format(
name, type_opts, device_fmt)
)
lines.append(newline)
else:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
if change:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return 'change'
if not change:
if present:
# The right entry is already here
return 'present'
else:
if not salt.utils.test_mode(test=test, **kwargs):
# The entry is new, add it to the end of the fstab
newline = (
'{0}\t{1}\t{2}\n'.format(
name, type_opts, device_fmt)
)
lines.append(newline)
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
raise CommandExecutionError(
'File not writable {0}'.format(
config
)
)
return 'new'
def automaster(config='/etc/auto_salt'):
'''
List the contents of the auto master
CLI Example:
.. code-block:: bash
salt '*' mount.automaster
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.fopen(config) as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
continue
if not line.strip():
# Blank line
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
continue
prefix = "/.."
name = comps[0].replace(prefix, "")
device_fmt = comps[2].split(":")
opts = comps[1].split(',')
ret[name] = {'device': device_fmt[1],
'fstype': opts[0],
'opts': opts[1:]}
return ret
def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
'''
Mount a device
CLI Example:
.. code-block:: bash
salt '*' mount.mount /mnt/foo /dev/sdz1 True
'''
# Darwin doesn't expect defaults when mounting without other options
if 'defaults' in opts and __grains__['os'] in ['MacOS', 'Darwin']:
opts = None
if isinstance(opts, six.string_types):
opts = opts.split(',')
if not os.path.exists(name) and mkmnt:
__salt__['file.mkdir'](name, user=user)
args = ''
if opts is not None:
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
if out['retcode']:
return out['stderr']
return True
def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
'''
Attempt to remount a device, if the device is not already mounted, mount
is called
CLI Example:
.. code-block:: bash
salt '*' mount.remount /mnt/foo /dev/sdz1 True
'''
force_mount = False
if __grains__['os'] in ['MacOS', 'Darwin']:
if opts == 'defaults':
opts = 'noowners'
if fstype == 'smbfs':
force_mount = True
if isinstance(opts, six.string_types):
opts = opts.split(',')
mnts = active()
if name in mnts:
# The mount point is mounted, attempt to remount it with the given data
if 'remount' not in opts and __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin']:
opts.append('remount')
if force_mount:
# We need to force the mount but first we should unmount
umount(name, device, user=user)
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
if __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin'] or force_mount:
cmd = 'mount {0} {1} {2} '.format(args, device, name)
else:
cmd = 'mount -u {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
if out['retcode']:
return out['stderr']
return True
# Mount a filesystem that isn't already
return mount(name, device, mkmnt, fstype, opts, user=user)
def umount(name, device=None, user=None):
'''
Attempt to unmount a device by specifying the directory it is mounted on
CLI Example:
.. code-block:: bash
salt '*' mount.umount /mnt/foo
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' mount.umount /mnt/foo /dev/xvdc1
'''
mnts = active()
if name not in mnts:
return "{0} does not have anything mounted".format(name)
if not device:
cmd = 'umount {0}'.format(name)
else:
cmd = 'umount {0}'.format(device)
out = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
if out['retcode']:
return out['stderr']
return True
def is_fuse_exec(cmd):
'''
Returns true if the command passed is a fuse mountable application.
CLI Example:
.. code-block:: bash
salt '*' mount.is_fuse_exec sshfs
'''
cmd_path = _which(cmd)
# No point in running ldd on a command that doesn't exist
if not cmd_path:
return False
elif not _which('ldd'):
raise CommandNotFoundError('ldd')
out = __salt__['cmd.run']('ldd {0}'.format(cmd_path), python_shell=False)
return 'libfuse' in out
def swaps():
'''
Return a dict containing information on active swap
CLI Example:
.. code-block:: bash
salt '*' mount.swaps
'''
ret = {}
if __grains__['os'] != 'OpenBSD':
with salt.utils.fopen('/proc/swaps') as fp_:
for line in fp_:
if line.startswith('Filename'):
continue
comps = line.split()
ret[comps[0]] = {'type': comps[1],
'size': comps[2],
'used': comps[3],
'priority': comps[4]}
else:
for line in __salt__['cmd.run_stdout']('swapctl -kl').splitlines():
if line.startswith(('Device', 'Total')):
continue
swap_type = "file"
comps = line.split()
if comps[0].startswith('/dev/'):
swap_type = "partition"
ret[comps[0]] = {'type': swap_type,
'size': comps[1],
'used': comps[2],
'priority': comps[5]}
return ret
def swapon(name, priority=None):
'''
Activate a swap disk
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
cmd = 'swapon {0}'.format(name)
if priority:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd, python_shell=False)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret
def swapoff(name):
'''
Deactivate a named swap mount
CLI Example:
.. code-block:: bash
salt '*' mount.swapoff /root/swapfile
'''
on_ = swaps()
if name in on_:
if __grains__['os'] != 'OpenBSD':
__salt__['cmd.run']('swapoff {0}'.format(name), python_shell=False)
else:
__salt__['cmd.run']('swapctl -d {0}'.format(name),
python_shell=False)
on_ = swaps()
if name in on_:
return False
return True
return None
def is_mounted(name):
'''
.. versionadded:: 2014.7.0
Provide information if the path is mounted
CLI Example:
.. code-block:: bash
salt '*' mount.is_mounted /mnt/share
'''
active_ = active()
if name in active_:
return True
else:
return False
|
|
# Copyright 2016 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from yabgp.common import constants as bgp_cons
from yabgp.message.attribute.nlri.evpn import EVPN
class TestEVPN(unittest.TestCase):
def test_parse_mac_ip_adv(self):
data_hex = b'\x02\x25\x00\x01\xac\x11\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x6c\x30\x00\x11\x22\x33\x44\x55\x20\x0b\x0b\x0b\x01\x00\x00\x00'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_MAC_IP_ADVERTISEMENT,
'value': {
'rd': '172.17.0.3:2',
'mac': '00-11-22-33-44-55',
'eth_tag_id': 108,
'esi': 0,
'ip': '11.11.11.1',
'label': [0]}
}]
self.assertEqual(data_list, EVPN.parse(data_hex))
def test_construct_mac_ip_adv(self):
data_hex = b'\x02\x25\x00\x01\xac\x11\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x6c\x30\x00\x11\x22\x33\x44\x55\x20\x0b\x0b\x0b\x01\x00\x00\x00'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_MAC_IP_ADVERTISEMENT,
'value': {
'rd': '172.17.0.3:2',
'mac': '00-11-22-33-44-55',
'eth_tag_id': 108,
'esi': 0,
'ip': '11.11.11.1',
'label': [0]}
}]
self.assertEqual(data_hex, EVPN.construct(data_list))
def test_parse_eth_auto_dis(self):
data_hex = b'\x01\x19\x00\x01\x01\x01\x01\x01\x80\x63\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x64\x00\x00\xa1'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_ETHERNET_AUTO_DISCOVERY,
'value': {
'rd': '1.1.1.1:32867',
'esi': 0,
'eth_tag_id': 100,
'label': [10]
}
}]
self.assertEqual(data_list, EVPN.parse(data_hex))
def test_construct_eth_auto_dis(self):
data_hex = b'\x01\x19\x00\x01\x01\x01\x01\x01\x80\x63\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x64\x00\x00\xa1'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_ETHERNET_AUTO_DISCOVERY,
'value': {
'rd': '1.1.1.1:32867',
'esi': 0,
'eth_tag_id': 100,
'label': [10]
}
}]
self.assertEqual(data_hex, EVPN.construct(data_list))
def test_parse_in_mul_eth_tag(self):
data_hex = b'\x03\x11\x00\x01\xac\x10\x00\x01\x17\x10\x00\x00\x00\x64\x20\xc0\xa8\x00\x01'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG,
'value': {
'rd': '172.16.0.1:5904',
'eth_tag_id': 100,
'ip': '192.168.0.1'
}
}]
self.assertEqual(data_list, EVPN.parse(data_hex))
def test_construct_in_mul_eth_tag(self):
data_hex = b'\x03\x11\x00\x01\xac\x10\x00\x01\x17\x10\x00\x00\x00\x64\x20\xc0\xa8\x00\x01'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG,
'value': {
'rd': '172.16.0.1:5904',
'eth_tag_id': 100,
'ip': '192.168.0.1'
}
}]
self.assertEqual(data_hex, EVPN.construct(data_list))
def test_parse_eth_segment(self):
data_hex = b'\x04\x17\x00\x01\xac\x10\x00\x01\x17\x10\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x20\xc0\xa8\x00\x01'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_ETHERNET_SEGMENT,
'value': {
'rd': '172.16.0.1:5904',
'esi': 0,
'ip': '192.168.0.1'
}
}]
self.assertEqual(data_list, EVPN.parse(data_hex))
def test_construct_eth_segment(self):
data_hex = b'\x04\x17\x00\x01\xac\x10\x00\x01\x17\x10\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x20\xc0\xa8\x00\x01'
data_list = [{
'type': bgp_cons.BGPNLRI_EVPN_ETHERNET_SEGMENT,
'value': {
'rd': '172.16.0.1:5904',
'esi': 0,
'ip': '192.168.0.1'
}
}]
self.assertEqual(data_hex, EVPN.construct(data_list))
def test_parse_ip_route_prefix_v4(self):
data_hex = b'\x05\x22\x00\x02\x00\x01\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x01\x18\x01\x01\x01\x00\x01\x01\x01\x01\x00\x00\xa1'
data_list = [{
'type': 5,
'value': {
'esi': 0,
'eth_tag_id': 1,
'gateway': '1.1.1.1',
'label': [10],
'prefix': '1.1.1.0/24',
'rd': '65536:2'}}]
self.assertEqual(data_list, EVPN.parse(data_hex))
def test_construct_ip_route_prefix_v4(self):
data_hex = b'\x05\x22\x00\x02\x00\x01\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x01\x18\x01\x01\x01\x00\x01\x01\x01\x01\x00\x00\xa1'
data_list = [{
'type': 5,
'value': {
'esi': 0,
'eth_tag_id': 1,
'gateway': '1.1.1.1',
'label': [10],
'prefix': '1.1.1.0/24',
'rd': '65536:2'}}]
self.assertEqual(data_hex, EVPN.construct(data_list))
def test_parse_ip_route_prefix_v6(self):
data_hex = b'\x05' \
b'\x3a' \
b'\x00\x02\x00\x01\x00\x00\x00\x02' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x01' \
b'\x40' \
b'\x20\x01\x32\x32\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01' \
b'\x20\x01\x32\x32\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01' \
b'\x00\x00\xa1'
data_list = [{
'type': 5,
'value': {
'esi': 0,
'eth_tag_id': 1,
'gateway': '2001:3232::1',
'label': [10],
'prefix': '2001:3232::1/64',
'rd': '65536:2'}}]
self.assertEqual(data_list, EVPN.parse(data_hex))
def test_construct_ip_route_prefix_v6(self):
data_hex = b'\x05' \
b'\x3a' \
b'\x00\x02\x00\x01\x00\x00\x00\x02' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x01' \
b'\x40' \
b'\x20\x01\x32\x32\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01' \
b'\x20\x01\x32\x32\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01' \
b'\x00\x00\xa1'
data_list = [{
'type': 5,
'value': {
'esi': 0,
'eth_tag_id': 1,
'gateway': '2001:3232::1',
'label': [10],
'prefix': '2001:3232::1/64',
'rd': '65536:2'}}]
self.assertEqual(data_hex, EVPN.construct(data_list))
if __name__ == '__main__':
unittest.main()
|
|
"""
This is a reference LSTM numpy implementation adapted from Karpathy's code:
The adaptation includes
- interface to use the same initialization values
- being able to read out intermediate values to compare with another LSTM
implementation
"""
import numpy as np
class LSTM:
@staticmethod
def init(input_size, hidden_size):
"""
Initialize parameters of the LSTM (both weights and biases in one matrix)
to be ones
"""
a = input_size + hidden_size + 1
b = 4 * hidden_size
# c = np.sqrt(input_size + hidden_size)
WLSTM = np.ones((a, b))
return WLSTM
@staticmethod
def forward(X, WLSTM, c0=None, h0=None):
"""
X should be of shape (n,b,input_size), where n = length of sequence, b = batch size
"""
n, b, input_size = X.shape
d = WLSTM.shape[1] / 4 # hidden size
if c0 is None:
c0 = np.zeros((b, d))
if h0 is None:
h0 = np.zeros((b, d))
# Perform the LSTM forward pass with X as the input
xphpb = WLSTM.shape[0] # x plus h plus bias, lol
# input [1, xt, ht-1] to each tick of the LSTM
Hin = np.zeros((n, b, xphpb))
# hidden representation of the LSTM (gated cell content)
Hout = np.zeros((n, b, d))
IFOG = np.zeros((n, b, d * 4)) # input, forget, output, gate (IFOG)
IFOGf = np.zeros((n, b, d * 4)) # after nonlinearity
C = np.zeros((n, b, d)) # cell content
Ct = np.zeros((n, b, d)) # tanh of cell content
for t in xrange(n):
# concat [x,h] as input to the LSTM
prevh = Hout[t - 1] if t > 0 else h0
Hin[t, :, 0] = 1 # bias
Hin[t, :, 1:input_size + 1] = X[t]
Hin[t, :, input_size + 1:] = prevh
# compute all gate activations. dots: (most work is this line)
IFOG[t] = Hin[t].dot(WLSTM)
# non-linearities
# sigmoids; these are the gates
IFOGf[t, :, :3 * d] = 1.0 / (1.0 + np.exp(-IFOG[t, :, :3 * d]))
IFOGf[t, :, 3 * d:] = np.tanh(IFOG[t, :, 3 * d:]) # tanh
# compute the cell activation
prevc = C[t - 1] if t > 0 else c0
C[t] = IFOGf[t, :, :d] * IFOGf[t, :, 3 * d:] + \
IFOGf[t, :, d:2 * d] * prevc
Ct[t] = np.tanh(C[t])
Hout[t] = IFOGf[t, :, 2 * d:3 * d] * Ct[t]
cache = {}
cache['WLSTM'] = WLSTM
cache['Hout'] = Hout
cache['IFOGf'] = IFOGf
cache['IFOG'] = IFOG
cache['C'] = C
cache['Ct'] = Ct
cache['Hin'] = Hin
cache['c0'] = c0
cache['h0'] = h0
# return C[t], as well so we can continue LSTM with prev state init if
# needed
return Hout, C[t], Hout[t], cache
@staticmethod
def backward(dHout_in, cache, dcn=None, dhn=None):
WLSTM = cache['WLSTM']
Hout = cache['Hout']
IFOGf = cache['IFOGf']
IFOG = cache['IFOG']
C = cache['C']
Ct = cache['Ct']
Hin = cache['Hin']
c0 = cache['c0']
# h0 = cache['h0']
n, b, d = Hout.shape
input_size = WLSTM.shape[0] - d - 1 # -1 due to bias
# backprop the LSTM
dIFOG = np.zeros(IFOG.shape)
dIFOGf = np.zeros(IFOGf.shape)
dWLSTM = np.zeros(WLSTM.shape)
dHin = np.zeros(Hin.shape)
dC = np.zeros(C.shape)
dX = np.zeros((n, b, input_size))
dh0 = np.zeros((b, d))
dc0 = np.zeros((b, d))
dHout = dHout_in.copy() # make a copy so we don't have any funny side effects
if dcn is not None:
dC[n - 1] += dcn.copy() # carry over gradients from later
if dhn is not None:
dHout[n - 1] += dhn.copy()
for t in reversed(xrange(n)):
tanhCt = Ct[t]
dIFOGf[t, :, 2 * d:3 * d] = tanhCt * dHout[t]
# backprop tanh non-linearity first then continue backprop
dC[t] += (1 - tanhCt ** 2) * (IFOGf[t, :, 2 * d:3 * d] * dHout[t])
if t > 0:
dIFOGf[t, :, d:2 * d] = C[t - 1] * dC[t]
dC[t - 1] += IFOGf[t, :, d:2 * d] * dC[t]
else:
dIFOGf[t, :, d:2 * d] = c0 * dC[t]
dc0 = IFOGf[t, :, d:2 * d] * dC[t]
dIFOGf[t, :, :d] = IFOGf[t, :, 3 * d:] * dC[t]
dIFOGf[t, :, 3 * d:] = IFOGf[t, :, :d] * dC[t]
# backprop activation functions
dIFOG[t, :, 3 * d:] = (1 - IFOGf[t, :, 3 * d:] **
2) * dIFOGf[t, :, 3 * d:]
y = IFOGf[t, :, :3 * d]
dIFOG[t, :, :3 * d] = (y * (1.0 - y)) * dIFOGf[t, :, :3 * d]
# backprop matrix multiply
dWLSTM += np.dot(Hin[t].transpose(), dIFOG[t])
dHin[t] = dIFOG[t].dot(WLSTM.transpose())
# backprop the identity transforms into Hin
dX[t] = dHin[t, :, 1:input_size + 1]
if t > 0:
dHout[t - 1, :] += dHin[t, :, input_size + 1:]
else:
dh0 += dHin[t, :, input_size + 1:]
# for debugging
# hidden_size = WLSTM.shape[0] - input_size - 1
# dWrecur = dWLSTM[-hidden_size:, :]
# dWinput = dWLSTM[1:input_size + 1, :]
# db = dWLSTM[0, :]
return dX, dWLSTM, dc0, dh0
@staticmethod
def runBatchFpropWithGivenInput(hidden_size, X):
"""
run the LSTM model through the given input data. The data has dimension
(seq_len, batch_size, hidden_size)
"""
# seq_len = X.shape[0]
# batch_size = X.shape[1]
input_size = X.shape[2]
WLSTM = LSTM.init(input_size, hidden_size)
# batch forward
Hout, cprev, hprev, batch_cache = LSTM.forward(X, WLSTM)
IFOGf = batch_cache['IFOGf']
Ct = batch_cache['Ct']
return Hout, IFOGf, Ct, batch_cache
@staticmethod
def runBatchBpropWithGivenDelta(hidden_size, batch_cache, delta):
"""
run the LSTM model through the given input errors. The data has dimension
(seq_len, batch_size, hidden_size)
"""
dH = delta
# get the batched version gradients
dX, dWLSTM, dc0, dh0 = LSTM.backward(dH, batch_cache)
input_size = dWLSTM.shape[0] - hidden_size - 1
dWrecur = dWLSTM[-hidden_size:, :]
dWinput = dWLSTM[1:input_size + 1, :]
db = dWLSTM[0, :]
return dX, dWrecur, dWinput, db, dWLSTM
# -------------------
# TEST CASES
# -------------------
def checkSequentialMatchesBatch():
""" check LSTM I/O forward/backward interactions """
n, b, d = (5, 3, 4) # sequence length, batch size, hidden size
input_size = 10
WLSTM = LSTM.init(input_size, d) # input size, hidden size
X = np.random.randn(n, b, input_size)
h0 = np.random.randn(b, d)
c0 = np.random.randn(b, d)
# sequential forward
cprev = c0
hprev = h0
caches = [{} for t in xrange(n)]
Hcat = np.zeros((n, b, d))
for t in xrange(n):
xt = X[t:t + 1]
_, cprev, hprev, cache = LSTM.forward(xt, WLSTM, cprev, hprev)
caches[t] = cache
Hcat[t] = hprev
# sanity check: perform batch forward to check that we get the same thing
H, _, _, batch_cache = LSTM.forward(X, WLSTM, c0, h0)
assert np.allclose(H, Hcat), 'Sequential and Batch forward don''t match!'
# eval loss
wrand = np.random.randn(*Hcat.shape)
# loss = np.sum(Hcat * wrand)
dH = wrand
# get the batched version gradients
BdX, BdWLSTM, Bdc0, Bdh0 = LSTM.backward(dH, batch_cache)
# now perform sequential backward
dX = np.zeros_like(X)
dWLSTM = np.zeros_like(WLSTM)
dc0 = np.zeros_like(c0)
dh0 = np.zeros_like(h0)
dcnext = None
dhnext = None
for t in reversed(xrange(n)):
dht = dH[t].reshape(1, b, d)
dx, dWLSTMt, dcprev, dhprev = LSTM.backward(
dht, caches[t], dcnext, dhnext)
dhnext = dhprev
dcnext = dcprev
dWLSTM += dWLSTMt # accumulate LSTM gradient
dX[t] = dx[0]
if t == 0:
dc0 = dcprev
dh0 = dhprev
# and make sure the gradients match
print 'Making sure batched version agrees with sequential version: (should all be True)'
print np.allclose(BdX, dX)
print np.allclose(BdWLSTM, dWLSTM)
print np.allclose(Bdc0, dc0)
print np.allclose(Bdh0, dh0)
def checkBatchGradient():
""" check that the batch gradient is correct """
# lets gradient check this beast
n, b, d = (5, 3, 4) # sequence length, batch size, hidden size
input_size = 10
WLSTM = LSTM.init(input_size, d) # input size, hidden size
X = np.random.randn(n, b, input_size)
h0 = np.random.randn(b, d)
c0 = np.random.randn(b, d)
# batch forward backward
H, Ct, Ht, cache = LSTM.forward(X, WLSTM, c0, h0)
wrand = np.random.randn(*H.shape)
# loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think
dH = wrand
dX, dWLSTM, dc0, dh0 = LSTM.backward(dH, cache)
def fwd():
h, _, _, _ = LSTM.forward(X, WLSTM, c0, h0)
return np.sum(h * wrand)
# now gradient check all
delta = 1e-5
rel_error_thr_warning = 1e-2
rel_error_thr_error = 1
tocheck = [X, WLSTM, c0, h0]
grads_analytic = [dX, dWLSTM, dc0, dh0]
names = ['X', 'WLSTM', 'c0', 'h0']
for j in xrange(len(tocheck)):
mat = tocheck[j]
dmat = grads_analytic[j]
name = names[j]
# gradcheck
for i in xrange(mat.size):
old_val = mat.flat[i]
mat.flat[i] = old_val + delta
loss0 = fwd()
mat.flat[i] = old_val - delta
loss1 = fwd()
mat.flat[i] = old_val
grad_analytic = dmat.flat[i]
grad_numerical = (loss0 - loss1) / (2 * delta)
if grad_numerical == 0 and grad_analytic == 0:
rel_error = 0 # both are zero, OK.
status = 'OK'
elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:
rel_error = 0 # not enough precision to check this
status = 'VAL SMALL WARNING'
else:
rel_error = (abs(grad_analytic - grad_numerical) /
abs(grad_numerical + grad_analytic))
status = 'OK'
if rel_error > rel_error_thr_warning:
status = 'WARNING'
if rel_error > rel_error_thr_error:
status = '!!!!! NOTOK'
# print stats
print('%s checking param %s index %s (val = %+8f), analytic = %+8f,' +
'numerical = %+8f, relative error = %+8f'
% (status, name, repr(np.unravel_index(i, mat.shape)), old_val,
grad_analytic, grad_numerical, rel_error))
if __name__ == "__main__":
checkSequentialMatchesBatch()
raw_input('check OK, press key to continue to gradient check')
checkBatchGradient()
print 'every line should start with OK. Have a nice day!'
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
def conv2d_transpose(value, filter, output_shape, strides, padding="SAME",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after (Deconvolutional
Networks)[http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf], but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]`.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filter, output_shape], name,
"conv2d_transpose") as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
raise ValueError(
"input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[3], filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}"
.format(output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3], filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
name=name)
# pylint: disable=protected-access
def bias_add(value, bias, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, or `complex64`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAdd") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add(value, bias, name=name)
ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape)
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.op_scope([features], name, "Relu6") as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops._relu6(features, name=name)
def softmax_cross_entropy_with_logits(logits, labels, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If using exclusive `labels`
(wherein one and only one class is true at a time), see
`sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and `labels` must have the same shape `[batch_size, num_classes]`
and the same dtype (either `float32` or `float64`).
Args:
logits: Unscaled log probabilities.
labels: Each row `labels[i]` must be a valid probability distribution.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and must have the shape `[batch_size, num_classes]`
and the dtype (either `float32` or `float64`).
`labels` must have the shape `[batch_size]` and the dtype `int64`.
Args:
logits: Unscaled log probabilities.
labels: Each entry `labels[i]` must be an index in `[0, num_classes)`.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
@ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SparseSoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
input_shape = logits_shape.with_rank(2)
batch_size = input_shape[0]
# labels_shape
op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
return [tensor_shape.vector(batch_size.value), input_shape]
@ops.RegisterShape("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
labels_shape = op.inputs[1].get_shape()
input_shape = logits_shape.merge_with(labels_shape).with_rank(2)
batch_size = input_shape[0]
return [tensor_shape.vector(batch_size.value), input_shape]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list of ints that has length >= 4.
The size of the window for each dimension of the input tensor.
strides: A list of ints that has length >= 4.
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
data_format: A string. 'NHWC' and 'NCHW" are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.op_scope([value], name, "AvgPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides,
padding=padding,
data_format=data_format,
name=name)
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and
type `tf.float32`.
ksize: A list of ints that has length >= 4. The size of the window for
each dimension of the input tensor.
strides: A list of ints that has length >= 4. The stride of the sliding
window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
data_format: A string. 'NHWC' and 'NCHW" are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with type `tf.float32`. The max pooled output tensor.
"""
with ops.op_scope([value], name, "MaxPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides,
padding=padding,
data_format=data_format,
name=name)
ops.RegisterShape("Relu")(common_shapes.unchanged_shape)
ops.RegisterShape("Relu6")(common_shapes.unchanged_shape)
ops.RegisterShape("Elu")(common_shapes.unchanged_shape)
ops.RegisterShape("Softplus")(common_shapes.unchanged_shape)
ops.RegisterShape("Softsign")(common_shapes.unchanged_shape)
@ops.RegisterShape("ReluGrad")
@ops.RegisterShape("Relu6Grad")
@ops.RegisterShape("EluGrad")
@ops.RegisterShape("SoftplusGrad")
@ops.RegisterShape("SoftsignGrad")
def _BinaryElementwiseShape(op):
"""Returns same shape as both inputs to op.
Args:
op: Input operation.
Returns:
Shape of both inputs to `op`.
"""
return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]
ops.RegisterShape("L2Loss")(common_shapes.scalar_shape)
ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4))
@ops.RegisterShape("LRNGrad")
def _LRNGradShape(op):
"""Shape function for LRNGrad op."""
in_grads_shape = op.inputs[0].get_shape().with_rank(4)
in_image_shape = op.inputs[1].get_shape().with_rank(4)
out_image_shape = op.inputs[2].get_shape().with_rank(4)
return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]
ops.RegisterShape("Softmax")(
common_shapes.unchanged_shape_with_rank(2))
@ops.RegisterShape("InTopK")
def _InTopKShape(op):
"""Shape function for InTopK op."""
predictions_shape = op.inputs[0].get_shape().with_rank(2)
targets_shape = op.inputs[1].get_shape().with_rank(1)
batch_size = predictions_shape[0].merge_with(targets_shape[0])
return [tensor_shape.vector(batch_size.value)]
@ops.RegisterShape("TopK")
@ops.RegisterShape("TopKV2")
def _TopKShape(op):
"""Shape function for TopK and TopKV2 ops."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(1)
if len(op.inputs) >= 2:
k = tensor_util.constant_value(op.inputs[1])
else:
k = op.get_attr("k")
last = input_shape[-1].value
if last is not None and k is not None and last < k:
raise ValueError("input.shape %s must have last dimension >= k = %d" %
(input_shape, k))
output_shape = input_shape[:-1].concatenate([k])
return [output_shape, output_shape]
@ops.RegisterShape("BatchNormWithGlobalNormalization")
def _BatchNormShape(op):
"""Shape function for BatchNormWithGlobalNormalization op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
mean_shape = op.inputs[1].get_shape().with_rank(1)
var_shape = op.inputs[2].get_shape().with_rank(1)
beta_shape = op.inputs[3].get_shape().with_rank(1)
gamma_shape = op.inputs[4].get_shape().with_rank(1)
mean_shape[0].merge_with(input_shape[3])
var_shape[0].merge_with(input_shape[3])
beta_shape[0].merge_with(input_shape[3])
gamma_shape[0].merge_with(input_shape[3])
return [input_shape]
@ops.RegisterShape("BatchNormWithGlobalNormalizationGrad")
def _BatchNormGradShape(op):
"""Shape function for BatchNormWithGlobalNormalizationGrad op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
mean_shape = op.inputs[1].get_shape().with_rank(1)
var_shape = op.inputs[2].get_shape().with_rank(1)
beta_shape = op.inputs[3].get_shape().with_rank(1)
out_backprop_shape = op.inputs[4].get_shape().with_rank(4)
input_shape = input_shape.merge_with(out_backprop_shape)
vector_dim = input_shape[3]
vector_dim = vector_dim.merge_with(mean_shape[0])
vector_dim = vector_dim.merge_with(var_shape[0])
vector_dim = vector_dim.merge_with(beta_shape[0])
return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)
ops.RegisterShape("Conv2D")(common_shapes.conv2d_shape)
ops.RegisterShape("DepthwiseConv2dNative")(
common_shapes.depthwise_conv2d_native_shape)
ops.RegisterShape("AvgPool")(common_shapes.avg_pool_shape)
ops.RegisterShape("MaxPool")(common_shapes.max_pool_shape)
@ops.RegisterShape("MaxPoolWithArgmax")
def _MaxPoolWithArgMaxShape(op):
"""Shape function for MaxPoolWithArgmax op."""
return common_shapes.max_pool_shape(op) * 2
@ops.RegisterShape("AvgPoolGrad")
def _AvgPoolGradShape(op):
"""Shape function for the AvgPoolGrad op."""
orig_input_shape = tensor_util.constant_value(op.inputs[0])
if orig_input_shape is not None:
return [tensor_shape.TensorShape(orig_input_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know orig_input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("Conv2DBackpropFilter")
def _Conv2DBackpropFilterShape(op):
"""Shape function for the Conv2DBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
if filter_shape is not None:
return [tensor_shape.TensorShape(filter_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know filter_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("Conv2DBackpropInput")
def _Conv2DBackpropInputShape(op):
"""Shape function for the Conv2DBackpropInput op."""
input_shape = tensor_util.constant_value(op.inputs[0])
if input_shape is not None:
return [tensor_shape.TensorShape(input_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("MaxPoolGrad")
@ops.RegisterShape("MaxPoolGradWithArgmax")
def _MaxPoolGradShape(op):
"""Shape function for the MaxPoolGrad op."""
orig_input_shape = op.inputs[0].get_shape().with_rank(4)
return [orig_input_shape]
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_in_depth * filter_height *
filter_width * 2))
@ops.RegisterStatistics("Conv2D", "weight_parameters")
def _calc_conv_weight_params(graph, node):
"""Calculates the on-disk size of the weights for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
filter_out_depth = int(filter_shape[3])
return ops.OpStats("weight_parameters", (filter_height * filter_width *
filter_in_depth * filter_out_depth))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@ops.RegisterStatistics("BiasAdd", "weight_parameters")
def _calc_bias_add_weight_params(graph, node):
"""Calculates the on-disk weight parameters for BiasAdd."""
bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
bias_shape.assert_is_fully_defined()
bias_count = np.prod(bias_shape.as_list())
return ops.OpStats("weight_parameters", bias_count)
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
# pylint: disable=invalid-name
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.op_scope([x], name, "dropout") as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, float) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
noise_shape = noise_shape or array_ops.shape(x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def top_k(input, k=1, sorted=True, name=None):
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)
# pylint: enable=invalid-name
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to compute receptive field of a fully-convolutional network.
Please refer to the following g3doc for detailed explanation on how this
computation is performed, and why it is important:
g3doc/photos/vision/features/delf/g3doc/rf_computation.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.platform import tf_logging as logging
def _get_rf_size_node_input(stride, kernel_size, rf_size_output):
"""Computes RF size at the input of a given layer.
Args:
stride: Stride of given layer (integer).
kernel_size: Kernel size of given layer (integer).
rf_size_output: RF size at output of given layer (integer).
Returns:
rf_size_input: RF size at input of given layer (integer).
"""
return stride * rf_size_output + kernel_size - stride
def _get_effective_stride_node_input(stride, effective_stride_output):
"""Computes effective stride at the input of a given layer.
Args:
stride: Stride of given layer (integer).
effective_stride_output: Effective stride at output of given layer
(integer).
Returns:
effective_stride_input: Effective stride at input of given layer
(integer).
"""
return stride * effective_stride_output
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding
class ReceptiveField(object):
"""Receptive field of a convolutional neural network.
Args:
size: Receptive field size.
stride: Effective stride.
padding: Effective padding.
"""
def __init__(self, size, stride, padding):
self.size = np.asarray(size)
self.stride = np.asarray(stride)
self.padding = np.asarray(padding)
def compute_input_center_coordinates(self, y, axis=None):
"""Computes the center of the receptive field that generated a feature.
Args:
y: An array of feature coordinates with shape `(..., d)`, where `d` is the
number of dimensions of the coordinates.
axis: The dimensions for which to compute the input center coordinates.
If `None` (the default), compute the input center coordinates for all
dimensions.
Returns:
x: Center of the receptive field that generated the features, at the input
of the network.
Raises:
ValueError: If the number of dimensions of the feature coordinates does
not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
y = np.asarray(y)
if y.shape[-1] != len(axis):
raise ValueError("Dimensionality of the feature coordinates `y` (%d) "
"does not match dimensionality of `axis` (%d)" %
(y.shape[-1], len(axis)))
return -self.padding[axis] + y * self.stride[axis] + (
self.size[axis] - 1) / 2
def compute_feature_coordinates(self, x, axis=None):
"""Computes the position of a feature given the center of a receptive field.
Args:
x: An array of input center coordinates with shape `(..., d)`, where `d`
is the number of dimensions of the coordinates.
axis: The dimensions for which to compute the feature coordinates.
If `None` (the default), compute the feature coordinates for all
dimensions.
Returns:
y: Coordinates of the features.
Raises:
ValueError: If the number of dimensions of the input center coordinates
does not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
x = np.asarray(x)
if x.shape[-1] != len(axis):
raise ValueError("Dimensionality of the input center coordinates `x` "
"(%d) does not match dimensionality of `axis` (%d)" %
(x.shape[-1], len(axis)))
return (x + self.padding[axis] +
(1 - self.size[axis]) / 2) / self.stride[axis]
def __iter__(self):
return iter(np.concatenate([self.size, self.stride, self.padding]))
def compute_receptive_field_from_graph_def(graph_def,
input_node,
output_node,
stop_propagation=None,
input_resolution=None):
"""Computes receptive field (RF) parameters from a Graph or GraphDef object.
The algorithm stops the calculation of the receptive field whenever it
encounters an operation in the list `stop_propagation`. Stopping the
calculation early can be useful to calculate the receptive field of a
subgraph such as a single branch of the
[inception network](https://arxiv.org/abs/1512.00567).
Args:
graph_def: Graph or GraphDef object.
input_node: Name of the input node or Tensor object from graph.
output_node: Name of the output node or Tensor object from graph.
stop_propagation: List of operations or scope names for which to stop the
propagation of the receptive field.
input_resolution: 2D list. If the input resolution to the model is fixed and
known, this may be set. This is helpful for cases where the RF parameters
vary depending on the input resolution (this happens since SAME padding in
tensorflow depends on input resolution in general). If this is None, it is
assumed that the input resolution is unknown, so some RF parameters may be
unknown (depending on the model architecture).
Returns:
rf_size_x: Receptive field size of network in the horizontal direction, with
respect to specified input and output.
rf_size_y: Receptive field size of network in the vertical direction, with
respect to specified input and output.
effective_stride_x: Effective stride of network in the horizontal direction,
with respect to specified input and output.
effective_stride_y: Effective stride of network in the vertical direction,
with respect to specified input and output.
effective_padding_x: Effective padding of network in the horizontal
direction, with respect to specified input and output.
effective_padding_y: Effective padding of network in the vertical
direction, with respect to specified input and output.
Raises:
ValueError: If network is not aligned or if either input or output nodes
cannot be found. For network criterion alignment, see
photos/vision/features/delf/g3doc/rf_computation.md
"""
# Convert a graph to graph_def if necessary.
if isinstance(graph_def, framework_ops.Graph):
graph_def = graph_def.as_graph_def()
# Convert tensors to names.
if isinstance(input_node, framework_ops.Tensor):
input_node = input_node.op.name
if isinstance(output_node, framework_ops.Tensor):
output_node = output_node.op.name
stop_propagation = stop_propagation or []
# Computes order of computation for a given graph.
node_info, name_to_node = graph_compute_order.get_compute_order(
graph_def=graph_def,
input_node_name=input_node,
input_node_size=input_resolution)
# Sort in reverse topological order.
ordered_node_info = sorted(node_info.items(), key=lambda x: -x[1].order)
# Dictionaries to keep track of receptive field, effective stride and
# effective padding of different nodes.
rf_sizes_x = {}
rf_sizes_y = {}
effective_strides_x = {}
effective_strides_y = {}
effective_paddings_x = {}
effective_paddings_y = {}
# Initialize dicts for output_node.
rf_sizes_x[output_node] = 1
rf_sizes_y[output_node] = 1
effective_strides_x[output_node] = 1
effective_strides_y[output_node] = 1
effective_paddings_x[output_node] = 0
effective_paddings_y[output_node] = 0
# Flag to denote if we found output node yet. If we have not, we skip nodes
# until the output node is found.
found_output_node = False
# Flag to denote if padding is undefined. This happens when SAME padding mode
# is used in conjunction with stride and kernel sizes which make it such that
# the padding to be applied would depend on the input size. In this case,
# alignment checks are skipped, and the effective padding is None.
undefined_padding = False
for _, (o, node, _, _) in ordered_node_info:
if node:
logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op))
else:
continue
# When we find input node, we can stop.
if node.name == input_node:
break
# Loop until we find the output node. All nodes before finding the output
# one are irrelevant, so they can be skipped.
if not found_output_node:
if node.name == output_node:
found_output_node = True
if found_output_node:
if node.name not in rf_sizes_x:
assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % node.name)
# In this case, node is not relevant since it's not part of the
# computation we're interested in.
logging.vlog(3, "Irrelevant node %s, skipping it...", node.name)
continue
# Get params for this layer.
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y, _, _) = parse_layer_parameters.get_layer_params(
node, name_to_node, node_info[node.name].input_size)
logging.vlog(3, "kernel_size_x = %s, kernel_size_y = %s, "
"stride_x = %s, stride_y = %s, "
"padding_x = %s, padding_y = %s, input size = %s" %
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y, node_info[node.name].input_size))
if padding_x is None or padding_y is None:
undefined_padding = True
# Get parameters at input of this layer which may or may not be propagated
# to the input layers.
rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x,
rf_sizes_x[node.name])
rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y,
rf_sizes_y[node.name])
effective_stride_input_x = _get_effective_stride_node_input(
stride_x, effective_strides_x[node.name])
effective_stride_input_y = _get_effective_stride_node_input(
stride_y, effective_strides_y[node.name])
if not undefined_padding:
effective_padding_input_x = _get_effective_padding_node_input(
stride_x, padding_x, effective_paddings_x[node.name])
effective_padding_input_y = _get_effective_padding_node_input(
stride_y, padding_y, effective_paddings_y[node.name])
else:
effective_padding_input_x = None
effective_padding_input_y = None
logging.vlog(
4, "rf_size_input_x = %s, rf_size_input_y = %s, "
"effective_stride_input_x = %s, effective_stride_input_y = %s, "
"effective_padding_input_x = %s, effective_padding_input_y = %s" %
(rf_size_input_x, rf_size_input_y, effective_stride_input_x,
effective_stride_input_y, effective_padding_input_x,
effective_padding_input_y))
# Loop over this node's inputs and potentially propagate information down.
for inp_name in node.input:
# Stop the propagation of the receptive field.
if any(inp_name.startswith(stop) for stop in stop_propagation):
logging.vlog(3, "Skipping explicitly ignored node %s.", inp_name)
continue
logging.vlog(4, "inp_name = %s", inp_name)
if inp_name.startswith("^"):
# The character "^" denotes a control dependency, so this input node
# can be safely ignored.
continue
inp_node = name_to_node[inp_name]
logging.vlog(4, "inp_node = \n%s", inp_node)
if inp_name in rf_sizes_x:
assert inp_name in rf_sizes_y, ("Node %s is in rf_sizes_x, but "
"not in rf_sizes_y" % inp_name)
logging.vlog(
4, "rf_sizes_x[inp_name] = %s,"
" rf_sizes_y[inp_name] = %s, "
"effective_strides_x[inp_name] = %s,"
" effective_strides_y[inp_name] = %s, "
"effective_paddings_x[inp_name] = %s,"
" effective_paddings_y[inp_name] = %s" %
(rf_sizes_x[inp_name], rf_sizes_y[inp_name],
effective_strides_x[inp_name], effective_strides_y[inp_name],
effective_paddings_x[inp_name], effective_paddings_y[inp_name]))
# This node was already discovered through a previous path, so we need
# to make sure that graph is aligned. This alignment check is skipped
# if the padding is not defined, since in this case alignment cannot
# be checked.
if not undefined_padding:
if effective_strides_x[inp_name] != effective_stride_input_x:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in horizontal direction")
if effective_strides_y[inp_name] != effective_stride_input_y:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in vertical direction")
if (rf_sizes_x[inp_name] - 1
) / 2 - effective_paddings_x[inp_name] != (
rf_size_input_x - 1) / 2 - effective_padding_input_x:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in horizontal direction")
if (rf_sizes_y[inp_name] - 1
) / 2 - effective_paddings_y[inp_name] != (
rf_size_input_y - 1) / 2 - effective_padding_input_y:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in vertical direction")
# Keep track of path with largest RF, for both directions.
if rf_sizes_x[inp_name] < rf_size_input_x:
rf_sizes_x[inp_name] = rf_size_input_x
effective_strides_x[inp_name] = effective_stride_input_x
effective_paddings_x[inp_name] = effective_padding_input_x
if rf_sizes_y[inp_name] < rf_size_input_y:
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_y[inp_name] = effective_padding_input_y
else:
assert inp_name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % inp_name)
# In this case, it is the first time we encounter this node. So we
# propagate the RF parameters.
rf_sizes_x[inp_name] = rf_size_input_x
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_x[inp_name] = effective_stride_input_x
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_x[inp_name] = effective_padding_input_x
effective_paddings_y[inp_name] = effective_padding_input_y
if not found_output_node:
raise ValueError("Output node was not found")
if input_node not in rf_sizes_x:
raise ValueError("Input node was not found")
return ReceptiveField(
(rf_sizes_x[input_node], rf_sizes_y[input_node]),
(effective_strides_x[input_node], effective_strides_y[input_node]),
(effective_paddings_x[input_node], effective_paddings_y[input_node]))
|
|
from django.conf import settings
from common.tests import ViewTestCase
from common import api
from common import clean
from common import exception
from common import util
class LoginTest(ViewTestCase):
def test_login_when_signed_out(self):
r = self.login_and_get(None, '/login')
self.assertContains(r, "Forgot your password?")
self.assertContains(r, "Sign Up Now")
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_when_signed_in(self):
r = self.login_and_get('popular', '/login')
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_redirect_to(self):
r = self.login_and_get('popular', '/login', {'redirect_to': '/channel'})
r = self.assertRedirectsPrefix(r, '/channel')
self.assertTemplateUsed(r, 'channel/templates/index.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login(self):
log = 'popular'
pwd = self.passwords[clean.nick(log)]
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_with_confirmed_email(self):
log = 'hotness'
pwd = self.passwords[clean.nick(log)]
confirmed_email = 'hotness@foobar.com'
r = self.client.post('/login', {'log': confirmed_email, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/hotness/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_bad_password(self):
log = 'popular'
pwd = 'BAD PASSWORD'
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username or password')
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_bad_user(self):
log = 'BAD USER'
pwd = 'BAD PASSWORD'
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username or password')
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_user_cleanup(self):
log = 'broken'
pwd = self.passwords[clean.nick(log)]
actor_ref_pre = api.actor_get(api.ROOT, log)
self.assert_(not actor_ref_pre.normalized_nick)
self.assertRaises(exception.ApiException,
api.stream_get_presence,
api.ROOT,
log)
self.assertRaises(exception.ApiException,
api.stream_get_comment,
api.ROOT,
log)
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/broken/overview')
actor_ref_post = api.actor_get(api.ROOT, log)
self.assert_(actor_ref_post.normalized_nick)
self.assert_(api.stream_get_presence(api.ROOT, log))
self.assert_(api.stream_get_comment(api.ROOT, log))
def test_login_deleted(self):
log = 'popular'
pwd = self.passwords[clean.nick(log)]
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
api.actor_remove(api.ROOT, 'popular')
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username')
self.assertTemplateUsed(r, 'login/templates/login.html')
# Test cases and expected outcomes:
# 'annoying', 'girlfriend' do not have an emails associated
# 'hermit' has an unconfirmed email
class LoginForgotTest(ViewTestCase):
##### Forgot password tests:
def test_login_forgot_already_logged_in(self):
r = self.login_and_get('popular', '/login/forgot')
# User gets sent back to the home page. Unfortunately, since this is
# 'prefix', it will match a redirect anywhere. :(
r = self.assertRedirectsPrefix(r, '/', target_status_code=302)
# For this reason, test the second redirect:
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
def test_login_forgot(self):
r = self.client.get('/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
def test_login_forgot_nick_popular(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters 'popular', 'popular' has a confirmed email.
# - Send notification to that email.
def test_nick_confirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters 'hermit', 'hermit' has an unconfirmed email
# - Send notification to that email.
def test_nick_unconfirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'hermit',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# TODO(termie): stub
# User enters 'popular', 'popular' has an unconfirmed email (shared with other
# users)
# - Send notification to that email.
def test_nick_multiple_unconfirmed(self):
pass
# User enters 'annoying', 'annoying' does not have an email
# - Tough shit.
def test_nick_no_email(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'annoying',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'does not have an email')
# User enters a user that doesn't exist
# - Tough shit.
def test_unknown_nick(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'idontexist',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'not found')
# User enters 'foo@bar.com', a confirmed email for 'popular'
# - Send notification to that email.
def test_email_confirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular@example.com',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters 'foo@bar.com', an unconfirmed email for 'hermit'
# - Send notification to that email
def test_email_unconfirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'hermit@example.com',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# TODO(termie): stub
# User enters 'foo@bar.com', an unconfirmed email for 'popular', 'unpopular'
# - Tough shit.
def test_email_multiple_unconfirmed(self):
pass
# User enters 'foo@bar.com', which doesn't map to anything
# - Tough shit.
def test_email_notfound(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'foo@bar.com',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'does not match any accounts')
class LogoutTest(ViewTestCase):
# Once user is logged out, we should not display the "Signed in as XXX" msg.
# See issue 336 for details
def test_logout_does_not_remain_signed_in(self):
r = self.login_and_get('popular', '/login')
self.assertRedirectsPrefix(r, '/user/popular/overview')
r = self.client.get('/logout')
self.assertTemplateUsed(r, 'login/templates/logout.html')
self.assertNotContains(r, "Signed in as")
|
|
#1
#v 0.001
'''
description:
UMC's GUI, instead of using the GL Feedback Buffer like any normal GUI,
uses it's own interface based on hitdefs.
how it works is it fills 2 buffers with data:
- Widgets:
holds the widget data (info), event states, and hitdef
- layer:
holds a complex arrangement of polygon layers
the Widgets buffer is accessed only by the input functions and the widget functions.
- the input functions trigger event states based on if the mouse is over an active hitdef
- the widget functions simply modify the state info if a particular widget. (based on the widget name)
they also modify the coordinates of the polygons, and draw or remove polygons in the layer buffer
the layer buffer is filled by the widget functions only once (it is never cleared) it is accessed after widget managment.
the buffer has 3 internal layers:
- the stack layer is an arrangement of polygons drawn from *max_size* (FG) to 0 (BG).
there is no mixed alpha between the FGs and BGs
- the overlay layer is drawn over the stack layer
this has the same structure as the stack layer, however all alphas are mixed
- the font layer is always the same length as the overlay layer
the internal layers here get sandwiched between the overlay layers
the use of these buffers is to build a smart GUI that manages itself
to deliver the best perfomance and change what's needed only when needed to.
(this cuts out 98% of the math needed to position everything)
the reason I decided to build this instead of using GL's buffer to manage this is because
in order to use the feedback buffer, you'd have to calculate the positions once when drawing the data,
and then recalculate them for the feedback mode to verify a hit. (eg: the hitdefs)
with this, the positions only need to be calculated once.
once created, the data isn't ment to be removed, however, it can be disabled until needed again.
(disabling the data will cause the display parser to simply skip over the data)
issues:
while this method has worked, it's not perfected,
and due to the large amount of widgets created for UMC's GUI, it does cause a bit of lag. (30 FPS at least)
one of these problems would be that the display parser draws the data in immediate mode.
(the vertex data and primitives are drawn individually and parsed as is)
a better solution would be either:
- use a single display list and recompile the data only when modified
- use a display list for each widget (and sub-widgets) and parse/draw only the needed display lists
I personally would go for the first option as this would allow for better performance when viewing the scene.
'''
import COMMON #file vars and functions for import/export processing
import VIEWER #mainly for the toggles
from VIEWER import __GL,__GLU,__pyg
from array import array as __arr
class __Widget(object):
class _event(object): #widget events
def __init__(self):
self.gainFocus=False #True for the first frame the cursor enters the hitspace
self.loseFocus=False #True for the first frame the cursor leaves the hitspace
self.hasFocus=False #True if the cursor is in the hitspace
self.clickL=False #True if the L mouse button was clicked
self.clickM=False #True if the M mouse button was clicked
self.clickR=False #True if the R mouse button was clicked
self.holdL=False #True if the L mouse button is held
self.holdM=False #True if the M mouse button is held
self.holdR=False #True if the R mouse button is held
self.releaseL=False #True if the L mouse button was released
self.releaseM=False #True if the M mouse button was released
self.releaseR=False #True if the R mouse button was released
self.scrollU=False #True if scrolling up
self.scrollD=False #True if scrolling down
self.keyPress=False #True if a key was pressed
self.keyHold=False #True if a key is being held
self.keyRelease=False#True if a key was released
class _hitdef: #Widget mouse-hit area
#the event handlers manage the widgets by their hit-defs.
#if the mouse does something within the widget's hit-def area, the widget is updated
def __init__(self):
self.enabled=True #allow hitdef testing
self.x=0.0; self.y=0.0
self.X=0.0; self.Y=0.0
def __init__(self):
self.info=None #state info filled and used by the widget's creation function
self.event=self._event() #Widget.event.*wevent*
self.hitdef=self._hitdef()
self.motion=False #allow repositioning
self.allowKeys=False #triggered by a widget (allows handling by the GUI sub-system)
self.key=None #the keys in which was pressed/released
class __Layer(object):
class _PrimitiveCollector(object):
class _String(object):
def __init__(self,string,r,g,b,a):
global char
self.text = string
self.lists=[]
self.x,self.y=0,0
self.s = 1.0
self.r,self.g,self.b,self.a=r,g,b,a
self.enabled = True
self.w,self.h=0,0
for c in string:
l,w,h = char[c]
self.lists.append(char[c])
if h>self.h: self.h = h
self.w += w+1 #1px spacing
def Update(self, string):
self.text = string
self.lists=[]
self.w,self.h=0,0
for c in string:
l,w,h = char[c]
self.lists.append(char[c])
if h>self.h: self.h = h
self.w += w+1 #1px spacing
rect=(-1,-1,-1,-1)
def Position(self,x,y,X,Y,s=None):
if (x,y,X,Y)!=self.rect:
self.rect=(x,y,X,Y)
if s!=None: self.s = s
w,h = self.w*self.s,self.h*self.s
#center to the area: (if specified)
if X!=None: x+=((X-x)/2)-w/2
if Y!=None: y+=((Y-y)/2)-h/2
self.x,self.y=x,y#+h
return True
else: return False
def Color(self,r,g,b,a): #ints required
if (self.r,self.g,self.b,self.a)!=(r,g,b,a): self.r,self.g,self.b,self.a=r,g,b,a; return True
else: return False
class _Primitive(object):
def __init__(self,isTri,r,g,b,a,v1,v2,v3,v4=None):
self.r,self.g,self.b,self.a=r,g,b,a
self.v1,self.v2,self.v3,self.v4=v1,v2,v3,v4
self.isTri=isTri
def Position(self,v1,v2,v3,v4=None):
if (v1,v2,v3,v4)!=(self.v1,self.v2,self.v3,self.v4): self.v1,self.v2,self.v3,self.v4=v1,v2,v3,v4; return True
else: return False
def Color(self,r,g,b,a):
fcm=1./255; r,g,b,a=r*fcm,g*fcm,b*fcm,a*fcm
if (self.r,self.g,self.b,self.a)!=(r,g,b,a): self.r,self.g,self.b,self.a=r,g,b,a; return True
else: return False
def __init__(self):
self.primitives={}
self.strings={}
def HasPrimitive(self,Name):
return Name in self.primitives
def AddTri(self,Name,v1,v2,v3,color=(0,0,0,0)):
fcm=1./255; r,g,b,a=color
self.primitives[Name]=self._Primitive(True,r*fcm,g*fcm,b*fcm,a*fcm,v1,v2,v3)
def AddQuad(self,Name,v1,v2,v3,v4,color=(0,0,0,0)):
fcm=1./255; r,g,b,a=color
self.primitives[Name]=self._Primitive(False,r*fcm,g*fcm,b*fcm,a*fcm,v1,v2,v3,v4)
def RemovePrimitive(self,Name): self.primitives.pop(Name)
# Text services:
def HasString(self,Name):
if Name in self.strings:
return self.strings[Name].enabled
else: return False
def AddString(self,Name,text,scale,x,y,X=None,Y=None,color=(0,0,0,255)): #X,Y is the area coords to center this text on
if Name not in self.strings: #add the string only if needed
fcm=1./255; r,g,b,a = color
self.strings[Name]=self._String(text,r*fcm,g*fcm,b*fcm,a*fcm)
self.strings[Name].Position(x,y,X,Y,scale)
else:
self.strings[Name].enabled=True
def RemoveString(self,Name):
self.strings[Name].enabled=False
def __init__(self):
self.stack={} #draws FG before BG with FG over BG (alphas are not mixed)
self.overlay={} #draws over the stack (alphas are mixed)
#self.font={} #same as Overly
#NOTE: overlay[1] draws over font[0]
def AddStack(self):
self.stack[len(self.stack)]=self._PrimitiveCollector()
def AddOverlay(self):
self.overlay[len(self.overlay)]=self._PrimitiveCollector()
#self.font[len(self.font)]=self._FontCollector()
def clear(self):
self.stack.clear()
self.overlay.clear()
#self.font={}
self.AddStack()
self.AddOverlay()
layer={} #GUI layering info (collection buffers)
layer[0]=__Layer() #updated once, modified, and reused (this is the main layer)
layer[0].AddStack()
layer[0].AddOverlay()
#Drawing Order:
#layer[0]
# stack[0] #BG
# primitive[0]
# primitive[1]
# primitive[2]
# stack[1] #FG (not influenced by BG)
# primitive[0] #these primitives are drawn first with a depth of ((len(stack)-stack_index)*.01)
# primitive[1]
#
# overlay[0] #a layer drawn over the stack-layer
# primitive[0]
# primitive[1]
# text[0]
# text[1]
# text[2]
#
# overlay[1] #overlays previous font, overlay, and stack
# primitive[0]
# primitive[1]
# text[0]
#
#layer[1] #(used for browsers, errors, and other popups)
# stack[0] #overlays layer[0]
# primitive[0]
# stack[1]
# primitive[0]
#
# overlay[0]
# primitive[0]
# text[0]
#
# overlay[1]
# primitive[0]
# text[0]
#this contains the info for each widget
Widgets = {}
motionx,motiony=None,None
movementx,movementy=None,None
AllowHitUpdates=True; noRelease=False
global __AllowVIEWERControl;__AllowVIEWERControl=True
#-----------------------------------
#main Widgets
def __RemoveSelectBox(Na,priority=0):
global Widgets,layer
try:
Widgets[Na].hitdef.enabled=False #disable the hitdef (save the state)
sbna='SelectBox%sQuad'%Na
sbfna='SelectBox%sText'%Na
sbbna='SelectBox%sButtonQuad'%Na
sbbdna='SelectBox%sButtonDecal'%Na
p2 = priority+2
p3 = priority+3
if layer[0].stack[p2].HasPrimitive(sbna):
layer[0].stack[p2].RemovePrimitive(sbna)
layer[0].stack[p2].RemoveString(sbfna)
layer[0].stack[p2].RemovePrimitive(sbbna)
layer[0].stack[p3].RemovePrimitive(sbbdna)
except: pass
def __SelectBox(X,Y,W,Na,Items,Def=0,Text='',priority=0):
global Widgets,layer, AllowHitUpdates,noRelease
#if Text=='': Text=Items[Def]
#minor pre-calculations
X2,Y2 = X+W,Y+20.
sy = Y2-Y
hsy=sy*.5
hsx2 = ((X2+15.)-X2)*.5
#verify the widget exists
try: W=Widgets[Na]
except KeyError:
Widgets[Na]=__Widget()
W=Widgets[Na]
W.info=[Def,Items[Def],False] #[selectionID,isOpen]
W.hitdef.x=X; W.hitdef.y=Y; W.hitdef.X=X2+15.; W.hitdef.Y=Y2
#update the HitDef if changed by an outside function
if AllowHitUpdates!=W.hitdef.enabled: W.hitdef.enabled=AllowHitUpdates
p2 = priority+2
p3 = priority+3
#drawing data:
sbna='SelectBox%sQuad'%Na
sbfna='SelectBox%sText'%Na
sbbna='SelectBox%sButtonQuad'%Na
sbbdna='SelectBox%sButtonDecal'%Na
if not layer[0].stack[p2].HasPrimitive(sbna): #don't draw if we already have
layer[0].stack[p2].AddQuad(sbna,[X,Y],[X2,Y],[X2,Y2],[X,Y2],(175,175,175,180))
layer[0].stack[p2].AddString(sbfna,W.info[1],0.667,X+5.,Y+2.,None,None,(0,0,0,220))
layer[0].stack[p2].AddQuad(sbbna,[X2,Y],[X2+15.,Y],[X2+15.,Y2],[X2,Y2],(95,95,95,180))
layer[0].stack[p3].AddTri(sbbdna,[X2+5.,(Y+hsy)-2.],[(X2+15.)-5.,(Y+hsy)-2.],[X2+hsx2,(Y+hsy)+2.], (63,63,63,180))
SB = layer[0].stack[p2].primitives[sbna]
SBF = layer[0].stack[p2].strings[sbfna]
SBB = layer[0].stack[p2].primitives[sbbna]
SBBD = layer[0].stack[p3].primitives[sbbdna]
#Positioning Verification
if SB.Position([X,Y],[X2,Y],[X2,Y2],[X,Y2]):
SBF.Position(X+5.,Y+2.,None,None)
SBB.Position([X2,Y],[X2+15.,Y],[X2+15.,Y2],[X2,Y2])
SBBD.Position([X2+5.,(Y+hsy)-2.],[(X2+15.)-5.,(Y+hsy)-2.],[X2+hsx2,(Y+hsy)+2.])
if Text=='':
if SBF.text != W.info[1]:
#print SBF.text,'- [', Items[W.info[0]],',', W.info[1],', False ]'
SBF.Update(W.info[1])
else:
if SBF.text != Text:
SBF.Update(Text)
#HitDef
if W.hitdef.x!=X: W.hitdef.x=X; W.hitdef.X=X2+15.
if W.hitdef.y!=Y: W.hitdef.y=Y; W.hitdef.Y=Y2
#Widget logic
if W.event.hasFocus: #change the color if the mouse is over the selection box
if W.event.clickL or W.event.holdL: #change the color if the selection box is clicked or held
SBB.Color(79,79,79,180)
else: SBB.Color(87,87,87,180)
else: SBB.Color(95,95,95,180)
if W.event.releaseL: W.info[2]=True #isOpen = True
State = W.info
if State[2]: #the box has been clicked
AllowHitUpdates=False #prevent hit updates from other widgets
#(once we've made our selection, we can then allow hit updates)
remove=False
for i,v in enumerate(Items):
#generate a custom widget name using the main name, the item's text, and the enumerant value
N = '%s_%s_Sel%i'%(Na,v,i)
#minor pre-calculations
yp=(sy*(i+1))
x1,y1,x2,y2=X,Y+yp,X2,Y2+yp
#we have to create a new widget for each entry here
#verify the widget exists
try: sW=Widgets[N]
except KeyError:
Widgets[N]=__Widget()
sW=Widgets[N]
sW.info=[Def,False] #[selectionID,isOpen]
sW.hitdef.x=x1; sW.hitdef.y=y1; sW.hitdef.X=x2; sW.hitdef.Y=y2
#HitDefs created at this point are always active
#drawing data:
sbsbna='SelectionButton%s'%N
sbsfna='SelectionFont%s'%N
if not layer[0].overlay[0].HasPrimitive(sbsbna):
layer[0].overlay[0].AddQuad(sbsbna,[x1,y1],[x2,y1],[x2,y2],[x1,y2],(0,0,0,127))
layer[0].overlay[0].AddString(sbsfna,v,0.667,x1+5.,y1+2.,None,None,(255,255,255,100))
sB=layer[0].overlay[0].primitives[sbsbna]
sF=layer[0].overlay[0].strings[sbsfna]
#we don't need to verify the positioning here... (nothing can be moved at this point)
#Widget logic
if sW.event.hasFocus:
if sW.event.clickL or sW.event.holdL: sB.Color(127,127,127,127); sF.Color(0,0,0,100)
else: sB.Color(191,191,191,127); sF.Color(0,0,0,100)
else: sB.Color(0,0,0,127); sF.Color(255,255,255,100)
#apply the selection and set to remove these widgets when LMB is released
if sW.event.releaseL:
SBF.Update(v)
if Text=='': W.info=[i,v,False]
else: W.info=[i,Text,False]
remove=True
#add a few widgets to define the click-off area
#(anywhere on the screen that's not in this widget's range)
# ^clicking will close the widget and keep it at it's current selection
_DAN=['%s_DeActivator%i'%(Na,i) for i in range(4)] #custom deactivator names
from VIEWER import W as _W, H as _H
#test if these widgets exist
try: R0=Widgets[_DAN[0]]; R1=Widgets[_DAN[1]]; R2=Widgets[_DAN[2]]; R3=Widgets[_DAN[3]]
except KeyError:
Widgets[_DAN[0]]=__Widget()
Widgets[_DAN[1]]=__Widget()
Widgets[_DAN[2]]=__Widget()
Widgets[_DAN[3]]=__Widget()
R0=Widgets[_DAN[0]]; R1=Widgets[_DAN[1]]; R2=Widgets[_DAN[2]]; R3=Widgets[_DAN[3]]
R0.hitdef.x=0.;R0.hitdef.y=0.;R0.hitdef.X=X ;R0.hitdef.Y=_H #left
R1.hitdef.x=X2;R1.hitdef.y=0.;R1.hitdef.X=_W;R1.hitdef.Y=_H #right
R2.hitdef.x=X ;R2.hitdef.y=0.;R2.hitdef.X=X2;R2.hitdef.Y=Y2 #top (Y2 because the main widget has no control here)
R3.hitdef.x=X ;R3.hitdef.y=y2;R3.hitdef.X=X2;R3.hitdef.Y=_H #bottom
#the logic to test for and execute a click-off (from any mouse button)
if any([R0.event.clickL,R0.event.clickM,R0.event.clickR,
R1.event.clickL,R1.event.clickM,R1.event.clickR,
R2.event.clickL,R2.event.clickM,R2.event.clickR,
R3.event.clickL,R3.event.clickM,R3.event.clickR]):
W.info[2]=False #isOpen = False
remove=True
if remove: #remove the selection widgets and click-off widgets
for i,v in enumerate(Items):
N='%s_%s_Sel%i'%(Na,v,i); sbsbna='SelectionButton%s'%N; sbsfna='SelectionFont%s'%N
layer[0].overlay[0].RemovePrimitive(sbsbna); layer[0].overlay[0].RemoveString(sbsfna)
Widgets.pop(N)
Widgets.pop(_DAN[0]) #left
Widgets.pop(_DAN[1]) #right
Widgets.pop(_DAN[2]) #top
Widgets.pop(_DAN[3]) #bottom
AllowHitUpdates=True
return W.info[0]
def __RemoveTButton(Na,priority=0):
global Widgets,layer
try:
Widgets[Na].hitdef.enabled=False #disable the hitdef (save the state)
tbna='TButton%sQuad'%Na
tbfna='TButton%sText'%Na
tbfbgna='TButton%sText'%Na
p2 = priority+2
if layer[0].stack[p2].HasPrimitive(tbna):
layer[0].stack[p2].RemovePrimitive(tbna)
layer[0].stack[p2].RemoveString(tbfna)
layer[0].stack[p2].RemovePrimitive(tbfbgna)
except: pass
def __TButton(X,Y,Na,St=False,Text='',fontcolor=(0,0,0,220),priority=0):
global Widgets,layer, AllowHitUpdates
#minor pre-calculations
X2,Y2=X+20.,Y+20.
fx,fy=X+25.,Y+2.
#verify the widget exists
try: W=Widgets[Na]
except KeyError:
Widgets[Na]=__Widget()
W=Widgets[Na]
W.info=St #toggle state
W.hitdef.x=X; W.hitdef.y=Y; W.hitdef.X=X2; W.hitdef.Y=Y2
#update the HitDef if changed by an outside function
if AllowHitUpdates!=W.hitdef.enabled: W.hitdef.enabled=AllowHitUpdates
#drawing data:
tbna='TButton%sQuad'%Na
tbfna='TButton%sText'%Na
tbfbgna='TButton%sText'%Na
p2 = priority+2
if not layer[0].stack[p2].HasPrimitive(tbna): #don't draw if we already have
layer[0].stack[p2].AddQuad(tbna,[X,Y],[X2,Y],[X2,Y2],[X,Y2],(95,95,95,180))
layer[0].stack[p2].AddString(tbfna,Text,0.667,fx,fy,None,None,fontcolor)
fxtbfw = fx+layer[0].stack[p2].strings[tbfna].w; fytbfh = fy+layer[0].stack[p2].strings[tbfna].h
layer[0].stack[p2].AddQuad(tbfbgna,[fx,fy],[fxtbfw,fy],[fxtbfw,fytbfh],[fx,fytbfh],(127,127,127,200))
TB = layer[0].stack[p2].primitives[tbna]
TBF = layer[0].stack[p2].strings[tbfna]
TBFBG = layer[0].stack[p2].primitives[tbfbgna]
#Positioning Verification
if TB.Position([X,Y],[X2,Y],[X2,Y2],[X,Y2]):
TBF.Position(fx,fy,None,None)
fxtbfw,fytbfh = fx+TBF.w,fy+TBF.h
TBFBG.Position([fx,fy],[fxtbfw,fy],[fxtbfw,fytbfh],[fx,fytbfh])
#HitDef
if W.hitdef.x!=X: W.hitdef.x=X; W.hitdef.X=X2
if W.hitdef.y!=Y: W.hitdef.y=Y; W.hitdef.Y=Y2
#Widget logic
if W.info:
if W.event.hasFocus: #change the color if the mouse is over the selection box
if W.event.clickL or W.event.holdL: #change the color if the selection box is clicked or held
TB.Color(79,79,79,180)
else: TB.Color(95,95,95,180)
else: TB.Color(79,79,79,180)
else:
if W.event.hasFocus:
if W.event.clickL or W.event.holdL:
TB.Color(79,79,79,180)
else: TB.Color(111,111,111,180)
else: TB.Color(95,95,95,180)
if W.event.releaseL: W.info=(False if W.info else True)
return W.info
def __RemoveButton(Na,priority=0):
global Widgets,layer
try:
Widgets[Na].hitdef.enabled=False #disable the hitdef (save the state)
bna='Button%sQuad'%Na
bfna='Button%sText'%Na
p2 = priority+2
if layer[0].stack[p2].HasPrimitive(bna):
layer[0].stack[p2].RemovePrimitive(bna)
layer[0].stack[p2].RemoveString(bfna)
except: pass
def __Button(X1,Y1,X2,Y2,Na,Text='',Hint='',fontcolor=(0,0,0,255),St=False,priority=0):
global Widgets,AllowHitUpdates,layer
#verify the widget exists
try: W=Widgets[Na]
except KeyError:
Widgets[Na]=__Widget()
W=Widgets[Na]
W.info=['button',St]
W.hitdef.x=X1; W.hitdef.y=Y1; W.hitdef.X=X2; W.hitdef.Y=Y2
#update the HitDef if changed by an outside function
if AllowHitUpdates!=W.hitdef.enabled: W.hitdef.enabled=AllowHitUpdates
#drawing data:
bna='Button%sQuad'%Na
bfna='Button%sText'%Na
if not layer[0].stack[priority+2].HasPrimitive(bna): #don't draw if we already have
layer[0].stack[priority+2].AddQuad(bna,[X1,Y1],[X2,Y1],[X2,Y2],[X1,Y2],(95,95,95,200))
layer[0].stack[priority+2].AddString(bfna,Text,0.667,X1,Y1,X2,Y2,fontcolor)
B = layer[0].stack[priority+2].primitives[bna]
BF = layer[0].stack[priority+2].strings[bfna]
#Positioning Verification
if B.Position([X1,Y1],[X2,Y1],[X2,Y2],[X1,Y2]):
BF.Position(X1,Y1,X2,Y2)
#HitDef
if W.hitdef.x!=X1: W.hitdef.x=X1
if W.hitdef.y!=Y1: W.hitdef.y=Y1
if W.hitdef.X!=X2: W.hitdef.X=X2
if W.hitdef.Y!=Y2: W.hitdef.Y=Y2
#Widget logic
if W.event.hasFocus: #change the color if the mouse is over the button
if W.event.clickL or W.event.holdL: #change the color if the button is clicked or held
B.Color(79,79,79,200)
else: B.Color(87,87,87,200)
else: B.Color(95,95,95,200)
if W.event.releaseL: W.info[1]=True #set the button state as True upon release
return W.info[1]
def __EndButton(Na): #sets button state as False
try:
if type(Widgets[Na].info)==list: #verify this name points to a button:
if Widgets[Na].info[0]=='button': Widgets[Na].info[1]=False
except KeyError: pass #this button may not yet be defined
def __RemoveScrollbar(Na,priority=0):
global Widgets,layer
try:
Widgets[Na].hitdef.enabled=False #disable the hitdef (save the state)
sbtna='ScrollBar%sTrack'%Na
sbbna='ScrollBar%sButton'%Na
p2 = priority+2
p3 = priority+3
if layer[0].stack[p2].HasPrimitive(sbtna):
layer[0].stack[p2].RemovePrimitive(sbtna)
layer[0].stack[p3].RemovePrimitive(sbbna)
except: pass
def __Scrollbar(X,Y,S,R,Na,y=False,priority=0):
global Widgets,layer, AllowHitUpdates,movementx,movementy
#TODO:
# - scrollbar-track hitdefs
# - R = float()
#verify the widget exists
try: W=Widgets[Na]
except KeyError:
Widgets[Na]=__Widget()
W=Widgets[Na]
W.info=0
#update the HitDef if changed by an outside function
if AllowHitUpdates!=W.hitdef.enabled: W.hitdef.enabled=AllowHitUpdates
if not W.motion: W.motion=True
#minor pre-calculations
BPX1,BPY1,BPX2,BPY2=X,Y,X+15.,Y+15.
P=W.info
if y: p=P; Y2=Y+S; X2=BPX2; BPY1+=p; BPY2+=p
else: p=P; X2=X+S; Y2=BPY2; BPX1+=p; BPX2+=p
#drawing data:
sbtna='ScrollBar%sTrack'%Na
sbbna='ScrollBar%sButton'%Na
p2 = priority+2
p3 = priority+3
if not layer[0].stack[p2].HasPrimitive(sbtna): #don't draw if we already have
layer[0].stack[p2].AddQuad(sbtna,[X,Y],[X2,Y],[X2,Y2],[X,Y2], (95,95,95,180))
layer[0].stack[p3].AddQuad(sbbna,[BPX1,BPY1],[BPX2,BPY1],[BPX2,BPY2],[BPX1,BPY2],(143,143,143,180))
SBT=layer[0].stack[p2].primitives[sbtna]
SBB=layer[0].stack[p3].primitives[sbbna]
#Positioning Verification
SBT.Position([X,Y],[X2,Y],[X2,Y2],[X,Y2])
SBB.Position([BPX1,BPY1],[BPX2,BPY1],[BPX2,BPY2],[BPX1,BPY2])
#HitDef
if W.hitdef.x!=BPX1: W.hitdef.x=BPX1; W.hitdef.X=BPX2
if W.hitdef.y!=BPY1: W.hitdef.y=BPY1; W.hitdef.Y=BPY2
#Widget logic
if W.event.hasFocus: #change the color if the mouse is over the button
if W.event.clickL or W.event.holdL: #change the color if the button is clicked or held
SBB.Color(159,159,159,180)
else: SBB.Color(175,175,175,180)
else: SBB.Color(167,167,167,180)
m,s = (movementy,S-15.) if y else (movementx,S-15.)
if W.event.holdL:
if m!=None:
W.info=m-Y if y else m-X
if W.info>s: W.info=s
elif W.info<0: W.info=0
#scale the range itself:
return -(R*((1./s)*W.info))
#-----------------------------------
#panel drawing functions and sub-functions
#---Left Panel---:
def __RemoveModelManageTab(pr):
__RemoveButton("ModelImportButton",pr)
__RemoveScrollbar('ModelManageTabSBar',pr)
def __DrawModelManageTab(W,H,pr):
__RemoveModelFeaturesTab(pr)
__RemoveModelExportTab(pr)
P=__Scrollbar(172.,52.,H-104.,240.,'ModelManageTabSBar',True,pr)
if __Button(40.,52.+P,140.,72.+P,"ModelImportButton","Import","",(230,230,230,255), priority=pr):
#---
#code when button is clicked
#---
__EndButton("ModelImportButton")
def __RemoveModelFeaturesTab(pr):
pass
def __DrawModelFeaturesTab(W,H,pr):
__RemoveModelManageTab(pr)
__RemoveModelExportTab(pr)
pass
def __RemoveModelExportTab(pr):
pass
def __DrawModelExportTab(W,H,pr):
__RemoveModelManageTab(pr)
__RemoveModelFeaturesTab(pr)
#__BrowseBar(pw10,ph40,180) #Model Export Path
pass
ActiveModelTab=0
def __RemoveModelPanel(pr):
global ActiveModelTab
__RemoveButton("ModelManageTab",pr)
__RemoveButton("ModelFeaturesTab",pr)
__RemoveButton("ModelExportTab",pr)
if ActiveModelTab==0: __RemoveModelManageTab(pr)
elif ActiveModelTab==1: __RemoveModelFeaturesTab(pr)
elif ActiveModelTab==2: __RemoveModelExportTab(pr)
def __ModelPanel(W,H,pr):
global ActiveModelTab
if __Button(0.,21.,210.,41.,"ModelManageTab","Models","",(230,230,230,255),True, pr):
if ActiveModelTab!=0:
ActiveModelTab=0
__DrawModelManageTab(W,H,pr)
__RemoveModelFeaturesTab(pr)
__RemoveModelExportTab(pr)
__EndButton("ModelManageTab")
MB1Y1,MB1Y2 = (H-41. if ActiveModelTab==0 else 42.),(H-21. if ActiveModelTab==0 else 62.)
if __Button(0.,MB1Y1,210.,MB1Y2,"ModelFeaturesTab","Features","",(230,230,230,255), priority=pr):
if ActiveModelTab!=1:
ActiveModelTab=1
__RemoveModelManageTab(pr)
__DrawModelFeaturesTab(W,H,pr)
__RemoveModelExportTab(pr)
__EndButton("ModelFeaturesTab")
MB2Y1,MB2Y2 = (H-20. if ActiveModelTab!=2 else 63.),(H if ActiveModelTab!=2 else 83.)
if __Button(0.,MB2Y1,210.,MB2Y2,"ModelExportTab","Export","",(230,230,230,255), priority=pr):
if ActiveModelTab!=2:
ActiveModelTab=2
__RemoveModelManageTab(pr)
__RemoveModelFeaturesTab(pr)
__DrawModelExportTab(W,H,pr)
__EndButton("ModelExportTab")
#draw widgets based on the active button
if ActiveModelTab==0: __DrawModelManageTab(W,H,pr)
elif ActiveModelTab==1: __DrawModelFeaturesTab(W,H,pr)
elif ActiveModelTab==2: __DrawModelExportTab(W,H,pr)
#---Right Panel---:
def __RemoveAnimManageTab(pr):
__RemoveButton("AnimImportButton",pr)
def __DrawAnimManageTab(W,H,pr):
__RemoveAnimFeaturesTab(pr)
__RemoveAnimExportTab(pr)
if __Button(W-160.,62.,W-50.,82.,"AnimImportButton","Import","",(230,230,230,255),priority=pr):
#---
#code when button is clicked
#---
__EndButton("AnimImportButton")
def __RemoveAnimFeaturesTab(pr):
pass
def __DrawAnimFeaturesTab(W,H,pr):
__RemoveAnimManageTab(pr)
__RemoveAnimExportTab(pr)
pass
def __RemoveAnimExportTab(pr):
pass
def __DrawAnimExportTab(W,H,pr):
__RemoveAnimManageTab(pr)
__RemoveAnimFeaturesTab(pr)
#__BrowseBar(pw10,ph40,180) #Anim Export Path
pass
ActiveAnimTab=0
def __RemoveAnimPanel(pr):
global ActiveAnimTab
__RemoveButton("AnimManageTab",pr)
__RemoveButton("AnimFeaturesTab",pr)
__RemoveButton("AnimExportTab",pr)
if ActiveAnimTab==0: __RemoveAnimManageTab(pr)
elif ActiveAnimTab==1: __RemoveAnimFeaturesTab(pr)
elif ActiveAnimTab==2: __RemoveAnimExportTab(pr)
def __AnimPanel(W,H,pr):
global ActiveAnimTab
#__ExPanel(pw*0,ph*21,pw*210,ph*h,1,'MODEL')
if __Button(W-210.,21.,W,41.,"AnimManageTab","Animations","",(230,230,230,255),True,pr):
if ActiveAnimTab!=0:
ActiveAnimTab=0
__DrawAnimManageTab(W,H,pr)
__RemoveAnimFeaturesTab(pr)
__RemoveAnimExportTab(pr)
__EndButton("AnimManageTab")
AB1Y1,AB1Y2 = (H-41. if ActiveAnimTab==0 else 42.),(H-21. if ActiveAnimTab==0 else 62.)
if __Button(W-210.,AB1Y1,W,AB1Y2,"AnimFeaturesTab","Features","",(230,230,230,255), priority=pr):
if ActiveAnimTab!=1:
ActiveAnimTab=1
__RemoveAnimManageTab(pr)
__DrawAnimFeaturesTab(W,H,pr)
__RemoveAnimExportTab(pr)
__EndButton("AnimFeaturesTab")
AB2Y1,AB2Y2 = (H-20. if ActiveAnimTab!=2 else 63.),(H if ActiveAnimTab!=2 else 83.)
if __Button(W-210.,AB2Y1,W,AB2Y2,"AnimExportTab","Export","",(230,230,230,255), priority=pr):
if ActiveAnimTab!=2:
ActiveAnimTab=2
__RemoveAnimManageTab(pr)
__RemoveAnimFeaturesTab(pr)
__DrawAnimExportTab(W,H,pr)
__EndButton("AnimExportTab")
#draw widgets based on the active button
if ActiveAnimTab==0: __DrawAnimManageTab(W,H,pr)
elif ActiveAnimTab==1: __DrawAnimFeaturesTab(W,H,pr)
elif ActiveAnimTab==2: __DrawAnimExportTab(W,H,pr)
#---Top Panel---:
def __RemoveDisplayPanel(pr):
__RemoveTButton('EnLight',pr)
__RemoveTButton('EnWire',pr)
__RemoveSelectBox('Draw Bones',pr)
__RemoveSelectBox('Display',pr)
__RemoveSelectBox('Projection',pr)
__RemoveSelectBox('3D Drawing',pr)
__RemoveSelectBox('Colors',pr)
__RemoveSelectBox('Freq',pr)
__RemoveTButton('EnNrm',pr)
__RemoveScrollbar('DisplayPanelSBar',pr)
def __DisplayPanel(W,H,X1,X2,pr):
#minor pre-calculations
X1261 = X1+261.
X1141 = X1+141.
X111 = X1+11.
V=__Scrollbar(X111,111,(X2-X111)-11.,0.,'DisplayPanelSBar', priority=pr)
VIEWER.TOGGLE_LIGHTING=__TButton(X111,31.,'EnLight',True,'Lighting', priority=pr)
VIEWER.TOGGLE_WIREFRAME=__TButton(X111,56.,'EnWire',False,'Wireframe', priority=pr)
VIEWER.TOGGLE_BONES=__SelectBox(X111,81.,110.,'Draw Bones',['None','Standard','Overlay (X-Ray)'],0, priority=pr)
#reversed drawing order left
if VIEWER.TOGGLE_3D==2:
__RemoveSelectBox('Colors',pr)
VIEWER.TOGGLE_3D_MODE[1]=[1./60,1./120][__SelectBox(X1261,81.,50.,'Freq',['60hz','120hz'],0, priority=pr)]
if VIEWER.TOGGLE_3D==1:
__RemoveSelectBox('Freq',pr)
VIEWER.TOGGLE_3D_MODE[0]=__SelectBox(X1261,81.,50.,'Colors',['R|GB','G|RB','B|RG'],0, priority=pr)
VIEWER.TOGGLE_3D=__SelectBox(X1141,81.,100.,'3D Drawing',['Off','Analglyph','Shutter'],0, priority=pr)
try: #this passive hack updates the widget to match the key toggle.
proj = Widgets['Projection']
proj.info = [ int(VIEWER.TOGGLE_ORTHO), ['Perspective','Orthographic'][VIEWER.TOGGLE_ORTHO], proj.info[2] ]
except: pass #widget not defined yet
VIEWER.TOGGLE_ORTHO = __SelectBox(X1141,56.,100.,'Projection',['Perspective','Orthographic'],1, priority=pr)
VIEWER.TOGGLE_GRID=[2 if VIEWER.TOGGLE_GRID>2 else VIEWER.TOGGLE_GRID,3,4][
__SelectBox(X1141,31.,100.,'Display',['Grid','Floor','Off'],0, priority=pr)]
VIEWER.TOGGLE_NORMALS = __TButton(X1261,31.,'EnNrm',False,'Normals', priority=pr)
#---Bottom Panel---:
def __RemoveControlPanel(pr):
pass
def __ControlPanel(W,H,X1,X2,pr):
pass
#---Expansion Panel---: (top bar)
def __DrawOptionsData(W,H,hw):
global layer
#options go here
def __RemoveOptionsData():
global layer
def __DrawUpdateData(W,H,hw):
global layer
if not layer[0].overlay[0].HasString('UpdateDevelopmentString'):
layer[0].overlay[0].AddString(
'UpdateDevelopmentString', "The Update system is still in development.", 0.889, 0.0,0.0, W,H-20., (0,0,0,127))
def __RemoveUpdateData(): #verify if anything is used by the update panel and remove it if so.
global layer
if layer[0].overlay[0].HasString('UpdateDevelopmentString'):
layer[0].overlay[0].RemoveString('UpdateDevelopmentString')
def __OpenOptionsUpdatePanel(W,H,hw):
#update the decal coords to match the button positions
OBD=layer[0].stack[3].primitives['OptionButtonDecal']
UND=layer[0].stack[3].primitives['UpdateNotificationDecal']
#move to bottom
OBD.Position([hw-10.,H-6.],[hw+10.,H-6.],[hw,H-14.])
UND.Position([W-12.,H-12.],[W-7.,H-12.],[W-7.,H-7.],[W-12.,H-7.])
def __CloseOptionsUpdatePanel(W,H,hw):
if layer[0].stack[0].HasPrimitive('OptionsUpdatePanelBG'): #remove the BG
layer[0].stack[0].RemovePrimitive('OptionsUpdatePanelBG')
__RemoveUpdateData(); __RemoveOptionsData() #remove any active display data
#update the decal coords to match the button positions
OBD=layer[0].stack[3].primitives['OptionButtonDecal']
UND=layer[0].stack[3].primitives['UpdateNotificationDecal']
#move buttons to top
OBD.Position([hw-10.,6.],[hw+10.,6.],[hw,14.])
UND.Position([W-12.,7.],[W-7.,7.],[W-7.,12.],[W-12.,12.])
OptionsUpdatePanelExpensionState=False #long var names won't easily get used
OptionsUpdatePanelButton=0 #ID of current button pressed (used when clicking another button)
def __OptionsUpdatePanel(W,H):
global OptionsUpdatePanelExpensionState,OptionsUpdatePanelButton,layer
PES=OptionsUpdatePanelExpensionState #short local name from long global name
hw = W*.5
if not layer[0].stack[3].HasPrimitive('OptionButtonDecal'):
if PES:
layer[0].stack[3].AddTri('OptionButtonDecal',
[hw-10.,H-6.],[hw+10.,H-6.],[hw,H-14.], (63,63,63,180))
layer[0].stack[3].AddQuad('UpdateNotificationDecal',
[W-12.,H-12.],[W-7.,H-12.],[W-7.,H-7.],[W-12.,H-7.], (255,63,63,200))
else:
layer[0].stack[3].AddTri('OptionButtonDecal',
[hw-10.,6.],[hw+10.,6.],[hw,14.], (63,63,63,180))
layer[0].stack[3].AddQuad('UpdateNotificationDecal',
[W-12.,7.],[W-7.,7.],[W-7.,12.],[W-12.,12.], (255,63,63,200))
if PES: #option/update panel is expanded:
Y = H
'''
if OptionsUpdatePanelButton==0: #options panel drawing:
__RemoveUpdateData(); __DrawOptionsData()
elif OptionsUpdatePanelButton==1: #update panel drawing:
__RemoveOptionsData(); __DrawUpdateData()
'''
if not layer[0].stack[0].HasPrimitive('OptionsUpdatePanelBG'): #create the BG
layer[0].stack[0].AddQuad('OptionsUpdatePanelBG',
[0.,0.],[W,0.],[W,H-20.],[0.,H-20.], (127,127,127,200))
else: #create new or modify existing decals
Y = 20.
if __Button(0.,Y-20.,W-21.,Y,"OptionsPanelToggle","","Options",(230,230,230,200)):
if not OptionsUpdatePanelExpensionState: #open panel
OptionsUpdatePanelExpensionState=True
OptionsUpdatePanelButton=0
__OpenOptionsUpdatePanel(W,H,hw)
__DrawOptionsData(W,H,hw)
elif OptionsUpdatePanelButton==0: #close panel
OptionsUpdatePanelExpensionState=False
__RemoveOptionsData()
__CloseOptionsUpdatePanel(W,H,hw)
else: #switch to options
OptionsUpdatePanelButton=0
__RemoveUpdateData()
__DrawOptionsData(W,H,hw)
__EndButton("OptionsPanelToggle")
if __Button(W-20.,Y-20.,W,Y,"UpdatePanelToggle","","Updates",(230,230,230,200)):
if not OptionsUpdatePanelExpensionState: #open panel
OptionsUpdatePanelExpensionState=True
OptionsUpdatePanelButton=1
__OpenOptionsUpdatePanel(W,H,hw)
__DrawUpdateData(W,H,hw)
elif OptionsUpdatePanelButton==1: #close panel
OptionsUpdatePanelExpensionState=False
__RemoveUpdateData()
__CloseOptionsUpdatePanel(W,H,hw)
else: #switch to update
OptionsUpdatePanelButton=1
__RemoveOptionsData()
__DrawUpdateData(W,H,hw)
__EndButton("UpdatePanelToggle")
return PES
#-----------------------------------
def __RemoveExPanel(Na,priority=0):
global Widgets,layer
try:
Widgets[Na].hitdef.enabled=False #disable the hitdef (save the state)
ebna='EButton%sQuad'%Na
ebdna='EButton%sDecal'%Na
p2 = priority+2
p3 = priority+3
if layer[0].stack[p2].HasPrimitive(ebna):
layer[0].stack[p2].RemovePrimitive(ebna)
layer[0].stack[p3].RemovePrimitive(ebdna)
if layer[0].stack[priority].HasPrimitive(Na): layer[0].stack[priority+0].RemovePrimitive(Na)
except: pass
def __ExPanel(X1,Y1,X2,Y2,EB,Na,MX=0.,MY=0.,St=True,priority=0): #returns current state for other panels
#MX and XY are for outside influence on the toggle button
global Widgets,layer #,pw,ph
global AllowHitUpdates
#minor pre-calculations
sx=X2-X1; sy=Y2-Y1
#verify the widget exists
try: W=Widgets[Na]
except KeyError:
Widgets[Na]=__Widget()
W=Widgets[Na]
W.info=St #toggle state
#update the HitDef if changed by an outside function
if AllowHitUpdates!=W.hitdef.enabled: W.hitdef.enabled=AllowHitUpdates
S=W.info
#60x15px rectangle calculations (toggle button)
if (EB==0 and S) or (EB==2 and not S): #top
xpos=X1+(sx/2)+MX
EBX1,EBY1,EBX2,EBY2=xpos-30.,Y1,xpos+30.,Y1+15.
TPX1,TPY1,TPX2,TPY2,TPX3,TPY3=xpos,Y1+10.,xpos-5.,Y1+5.,xpos+5.,Y1+5.
elif (EB==1 and S) or (EB==3 and not S): #right
ypos=Y1+(sy/2)+MY
EBX1,EBY1,EBX2,EBY2=X2-15.,ypos-30.,X2,ypos+30.
TPX1,TPY1,TPX2,TPY2,TPX3,TPY3=X2-10.,ypos,X2-5.,ypos-5.,X2-5.,ypos+5.
elif (EB==2 and S) or (EB==0 and not S): #bottom
xpos=X1+(sx/2)+MX
EBX1,EBY1,EBX2,EBY2=xpos-30.,Y2-15.,xpos+30.,Y2
TPX1,TPY1,TPX2,TPY2,TPX3,TPY3=xpos,Y2-10.,xpos+5.,Y2-5.,xpos-5.,Y2-5.
elif (EB==3 and S) or (EB==1 and not S): #left
ypos=Y1+(sy/2)+MY
EBX1,EBY1,EBX2,EBY2=X1,ypos-30.,X1+15.,ypos+30.
TPX1,TPY1,TPX2,TPY2,TPX3,TPY3=X1+10.,ypos,X1+5.,ypos-5.,X1+5.,ypos+5.
#drawing data:
ebna='EButton%sQuad'%Na
ebdna='EButton%sDecal'%Na
p2 = priority+2
p3 = priority+3
if not layer[0].stack[p2].HasPrimitive(ebna): #don't draw if we already have
layer[0].stack[p2].AddQuad(ebna,[EBX1,EBY1],[EBX2,EBY1],[EBX2,EBY2],[EBX1,EBY2],(95,95,95,200))
layer[0].stack[p3].AddTri(ebdna,[TPX1,TPY1],[TPX2,TPY2],[TPX3,TPY3],(63,63,63,180))
if S: #is the panel expanded?
if not layer[0].stack[priority].HasPrimitive(Na): #add the BG
layer[0].stack[priority].AddQuad(Na,[X1,Y1],[X2,Y1],[X2,Y2],[X1,Y2],(127,127,127,200))
else: #remove the BG
if layer[0].stack[priority].HasPrimitive(Na):
layer[0].stack[priority].RemovePrimitive(Na)
B = layer[0].stack[p2].primitives[ebna]
BD = layer[0].stack[p3].primitives[ebdna]
#Positioning Verification
if B.Position([EBX1,EBY1],[EBX2,EBY1],[EBX2,EBY2],[EBX1,EBY2]):
BD.Position([TPX1,TPY1],[TPX2,TPY2],[TPX3,TPY3])
if layer[0].stack[priority].HasPrimitive(Na):
layer[0].stack[priority].primitives[Na].Position([X1,Y1],[X2,Y1],[X2,Y2],[X1,Y2])
#HitDef
if W.hitdef.x!=EBX1: W.hitdef.x=EBX1; W.hitdef.X=EBX2
if W.hitdef.y!=EBY1: W.hitdef.y=EBY1; W.hitdef.Y=EBY2
#Widget logic
if W.event.hasFocus: #change the color if the mouse is over the button
if W.event.clickL or W.event.holdL: #change the color if the button is clicked or held
B.Color(79,79,79,200)
else: B.Color(87,87,87,200)
else: B.Color(95,95,95,200)
if W.event.releaseL: W.info=not W.info
return S
#-----------------------------------
#input functions:
def __FrameCheck(): #where most of the widget-event state-logic happens.
#the functions below simply handle base-state functions
#a frame must pass before the base state can be reverted (where this function comes in)
global Widgets,motionx,motiony,movementx,movementy
for WN in Widgets:
W=Widgets[WN]
#check for a click event: (transfer click to hold)
if W.event.clickL: W.event.clickL=False; W.event.holdL=True
if W.event.clickM: W.event.clickM=False; W.event.holdM=True
if W.event.clickR: W.event.clickR=False; W.event.holdR=True
#check for a release event: (disable the hold-state)
if W.event.releaseL: W.event.releaseL=False; W.event.holdL=False
if W.event.releaseM: W.event.releaseM=False; W.event.holdM=False
if W.event.releaseR: W.event.releaseR=False; W.event.holdR=False
#check for a scroll event:
if W.event.scrollU: W.event.scrollU=False
if W.event.scrollD: W.event.scrollD=False
#check for a key press event: (transfer press to hold)
if W.event.keyPress: W.event.keyPress=False; W.event.keyHold=True
#check for a key release event: (disable the hold-state)
if W.event.keyRelease: W.event.keyRelease=False; W.event.keyHold=False; W.key=None
#check for a mouse-focus event:
if W.event.gainFocus: W.event.gainFocus=False; W.event.hasFocus=True
if W.event.loseFocus:
W.event.loseFocus=False
W.event.hasFocus=False
#don't remember click events if we lose focus
W.event.holdL=False
W.event.holdM=False
W.event.holdR=False
#check for mouse drag and reset:
if motionx!=None: motionx=None
if motiony!=None: motiony=None
if movementx!=None: movementx=None
if movementy!=None: movementy=None
doFrameCheck=False
def __Click(b,x,y):
global Widgets,doFrameCheck
first=True
for WN in Widgets:
W=Widgets[WN]; HD=W.hitdef
if HD.enabled:
X1,Y1,X2,Y2=HD.x,HD.y,HD.X,HD.Y
if X1<x<X2 and Y1<y<Y2 and first: # first enabled Widget clicked
if b==1: W.event.clickL=True; doFrameCheck=True
if b==2: W.event.clickM=True; doFrameCheck=True
if b==3: W.event.clickR=True; doFrameCheck=True
#scrolling:
if b==4: W.event.scrollU=True; doFrameCheck=True
if b==5: W.event.scrollD=True; doFrameCheck=True
first=False #only perform operations on the first widget
def __Release(b,x,y):
global Widgets,doFrameCheck,noRelease
first=True
for WN in Widgets:
W=Widgets[WN]; HD=W.hitdef
if HD.enabled:
X1,Y1,X2,Y2=HD.x,HD.y,HD.X,HD.Y
if X1<x<X2 and Y1<y<Y2 and first: # first enabled Widget released
if b==1: W.event.releaseL=True; doFrameCheck=True
if b==2: W.event.releaseM=True; doFrameCheck=True
if b==3: W.event.releaseR=True; doFrameCheck=True
#scrolling is managed by "__FrameCheck", so it's not needed here
first=False
def __Motion(b,x,y,rx,ry):
#rx,ry - the number of pixels the mouse has moved (float values)
global Widgets,doFrameCheck,motionx,motiony
motionx,motiony=rx,ry
movementx,movementy=x,y
_x=x; _y=y
for WN in Widgets:
W=Widgets[WN]; HD=W.hitdef
if HD.enabled:
X1,Y1,X2,Y2=HD.x,HD.y,HD.X,HD.Y
if X1<_x<X2 and Y1<_y<Y2: W.event.gainFocus=True; doFrameCheck=True
else:
if W.event.hasFocus: W.event.loseFocus=True; doFrameCheck=True
def __KeyPress(k):
global Widgets,doFrameCheck
for WN in Widgets:
W=Widgets[WN]
if W.allowKeys:
W.event.keyPress=True; doFrameCheck=True
W.key=k
def __KeyRelease(k):
global Widgets,doFrameCheck
for WN in Widgets:
W=Widgets[WN]
if W.allowKeys:
W.event.keyRelease=True; doFrameCheck=True
W.key=k
#-----------------------------------
#main functions:
def __ResizeGUI(w,h):
global layer
for lid in layer: layer[lid].clear()
__initGUI()
showHitDefs=0
def __DrawGUI(W,H,RotMatrix): #called directly by the display function after drawing the scene
global layer
#the GUI is drawn over the scene by clearing the depth buffer
#verify we have the needed stack layers for drawing
#layer[0].stack[0] is used for basic BG drawing
try: layer[0].stack[1] #used for sub-BG widgets such as scroll-boxes
except KeyError: layer[0].AddStack()
try: layer[0].stack[2] #used for basic overlay widgets such as buttons
except KeyError: layer[0].AddStack()
try: layer[0].stack[3] #used for special decals drawn on overlay widgets
except KeyError: layer[0].AddStack()
# overlay (side panels)
try: layer[0].stack[4] #reserved
except KeyError: layer[0].AddStack()
try: layer[0].stack[5] #basic BG drawing
except KeyError: layer[0].AddStack()
try: layer[0].stack[6] #used for sub-BG widgets such as scroll-boxes
except KeyError: layer[0].AddStack()
try: layer[0].stack[7] #used for basic overlay widgets such as buttons
except KeyError: layer[0].AddStack()
try: layer[0].stack[8] #used for special decals drawn on overlay widgets
except KeyError: layer[0].AddStack()
try: layer[0].overlay[1] #used for the selection box menus to overlay the widget fonts
except KeyError: layer[0].AddOverlay()
__GL.glMatrixMode(__GL.GL_PROJECTION)
__GL.glLoadIdentity()
__GL.glOrtho(0.0, W, H, 0.0, -50, 50)
#glOrtho(GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble near, GLdouble far)
__GL.glMatrixMode(__GL.GL_MODELVIEW)
__GL.glLoadIdentity()
__GL.glClear( __GL.GL_DEPTH_BUFFER_BIT )
__GL.glDisable(__GL.GL_TEXTURE_2D)
__GL.glDisable(__GL.GL_LIGHTING)
M,A,D,C=False,False,False,False
if __OptionsUpdatePanel(W,H):
__RemoveModelPanel(5)
__RemoveExPanel('MODEL',5)
__RemoveAnimPanel(5)
__RemoveExPanel('ANIM',5)
__RemoveDisplayPanel(0)
__RemoveExPanel('DSPL',0)
__RemoveControlPanel(0)
__RemoveExPanel('CTRL',0)
if layer[0].stack[5].HasPrimitive('clear1'):
layer[0].stack[5].RemovePrimitive('clear1')
layer[0].stack[5].RemovePrimitive('clear2')
else:
M = __ExPanel(0.,21.,210.,H,1.,'MODEL',0.,-11.,priority=5)
if M:
__ModelPanel(W,H,5)
layer[0].stack[5].AddQuad('clear1',[210.,21.],[211.,21.],[211.,H],[210.,H])
else:
__RemoveModelPanel(5)
if layer[0].stack[5].HasPrimitive('clear1'):
layer[0].stack[5].RemovePrimitive('clear1')
A = __ExPanel(W-210.,21.,W,H,3.,'ANIM',0.,-11.,priority=5)
if A:
__AnimPanel(W,H,5)
layer[0].stack[5].AddQuad('clear2',[W-211.,21.],[W-210.,21.],[W-210.,H],[W-211.,H])
else:
__RemoveAnimPanel(5)
if layer[0].stack[5].HasPrimitive('clear2'):
layer[0].stack[5].RemovePrimitive('clear2')
DCX1,DCX2=210. if M else 0.,W-210. if A else W
D = __ExPanel(211. if M else 0.,21.,W-211. if A else W,150.,2.,'DSPL',(0. if M else 105.)+(0. if A else -105.))
if D: __DisplayPanel(W,H,DCX1,DCX2,0)
else: __RemoveDisplayPanel(0)
C = __ExPanel(211. if M else 0.,H-150.,W-211. if A else W,H,0.,'CTRL',(0. if M else 105.)+(0. if A else -105.))
if C: __ControlPanel(W,H,DCX1,DCX2,0)
else: __RemoveControlPanel(0)
__GL.glDisable(__GL.GL_BLEND)
#axis
#TODO: use a GL display list for this:
__GL.glLineWidth(1.0)
__GL.glPushMatrix()
__GL.glTranslatef(228. if M else 17.,H-(167. if C else 17.),0.)
__GL.glScalef(600,600,1)
__GL.glMultMatrixf(RotMatrix)
__GL.glColor3f(1.0,0.0,0.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.02,0.0,0.0); __GL.glEnd() #X
__GL.glTranslatef(0.0145,0.0,0.0); __GL.glRotatef(90, 0.0, 1.0, 0.0)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glRotatef(-90, 0.0, 1.0, 0.0); __GL.glTranslatef(-0.0145,0.0,0.0)
__GL.glColor3f(0.0,1.0,0.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.0,-0.02,0.0); __GL.glEnd() #Y
__GL.glTranslatef(0.0,-0.0145,0.0); __GL.glRotatef(90, 1.0, 0.0, 0.0)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glRotatef(-90, 1.0, 0.0, 0.0); __GL.glTranslatef(0.0,0.0145,0.0)
__GL.glColor3f(0.0,0.0,1.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.0,0.0,0.02); __GL.glEnd() #Z
__GL.glTranslatef(0.0,0.0,0.0145)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glTranslatef(0.0,0.0,-0.0145)
__GL.glColor3f(0.5,0.5,0.5) ; #__GLUT.glutSolidSphere(0.003, 8, 4)
__GL.glPopMatrix()
__GL.glEnable(__GL.GL_BLEND)
for lid in layer:
l=layer[lid]
__GL.glEnable(__GL.GL_DEPTH_TEST)
sl=len(l.stack)-1
#print 'layer[%i]:'%lid
#print '%i stack layers:'%(sl+1)
for sid in l.stack: #the hash order shold already order as 0+ (0 being the BG, [-1] the FG)
#reverse the order (draw FG first at highest point then draw BG(s) behind FG)
n=sl-sid
d=(n)*.01
d2=(n+.5)*.01
sprimitives=l.stack[n].primitives
sstrings=l.stack[n].strings
#print ' stack[%i] - %i primitives:'%(n,len(sprimitives))
for spname in sprimitives:
#print ' "%s"'%spname
p=sprimitives[spname] #localize the primitive for easy reference
__GL.glColor4f(p.r,p.g,p.b,p.a)
if p.isTri: __GL.glBegin(__GL.GL_TRIANGLES); __GL.glVertex3fv(p.v1+[d]); __GL.glVertex3fv(p.v2+[d]); __GL.glVertex3fv(p.v3+[d]); __GL.glEnd()
else: __GL.glBegin(__GL.GL_QUADS); __GL.glVertex3fv(p.v1+[d]); __GL.glVertex3fv(p.v2+[d]); __GL.glVertex3fv(p.v3+[d]); __GL.glVertex3fv(p.v4+[d]); __GL.glEnd()
__GL.glEnable(__GL.GL_TEXTURE_2D)
#print ' font[%i] - %i strings:'%(oid,len(strings))
for ssname in sstrings:
#print ' "%s"'%sname
s=sstrings[ssname] #localize
if s.enabled:
__GL.glLoadIdentity()
#__GL.glPushMatrix()
__GL.glColor4f(s.r,s.g,s.b,s.a)
__GL.glTranslatef(s.x,s.y,d2)
__GL.glScalef(s.s,s.s,s.s)
xpos = 0.0
for list,w,h in s.lists:
if list!=None:
__GL.glTranslatef(xpos,0.0,0.0)
__GL.glCallList(list)
xpos=w+1
#print (s.x+xpos,s.y)
__GL.glLoadIdentity()
#__GL.glPopMatrix()
__GL.glDisable(__GL.GL_TEXTURE_2D)
__GL.glDisable(__GL.GL_DEPTH_TEST)
#print '%i ovarlay/font layers:'%len(l.overlay)
for oid in l.overlay:
oprimitives=l.overlay[oid].primitives
#print ' overlay[%i] - %i primitives:'%(oid,len(oprimitives))
for opname in oprimitives:
#print ' "%s"'%opname
p=oprimitives[opname] #localize the primitive for easy reference
__GL.glColor4f(p.r,p.g,p.b,p.a)
if p.isTri: __GL.glBegin(__GL.GL_TRIANGLES); __GL.glVertex2fv(p.v1); __GL.glVertex2fv(p.v2); __GL.glVertex2fv(p.v3); __GL.glEnd()
else: __GL.glBegin(__GL.GL_QUADS); __GL.glVertex2fv(p.v1); __GL.glVertex2fv(p.v2); __GL.glVertex2fv(p.v3); __GL.glVertex2fv(p.v4); __GL.glEnd()
__GL.glEnable(__GL.GL_TEXTURE_2D)
ostrings=l.overlay[oid].strings
#print ' font[%i] - %i strings:'%(oid,len(strings))
for osname in ostrings:
#print ' "%s"'%sname
s=ostrings[osname] #localize
if s.enabled:
__GL.glLoadIdentity()
#__GL.glPushMatrix()
__GL.glColor4f(s.r,s.g,s.b,s.a)
__GL.glTranslatef(s.x,s.y,0.0)
__GL.glScalef(s.s,s.s,s.s)
xpos = 0.0
for list,w,h in s.lists:
if list!=None:
__GL.glTranslatef(xpos,0.0,0.0)
__GL.glCallList(list)
xpos=w+1
#print (s.x+xpos,s.y)
__GL.glLoadIdentity()
#__GL.glPopMatrix()
__GL.glDisable(__GL.GL_TEXTURE_2D)
#raw_input()
#for debugging: (draw the active HitDefs)
global showHitDefs,Widgets
if showHitDefs:
__GL.glDisable(__GL.GL_BLEND)
for wname in Widgets:
W=Widgets[wname]
HD=W.hitdef
if HD.enabled:
__GL.glLineWidth(1.5)
x=HD.x; y=HD.y; X=HD.X; Y=HD.Y
__GL.glBegin(__GL.GL_LINE_LOOP)
__GL.glColor3f(1 if W.event.hasFocus else 0,1,0) #hitdef will be yellow if focused on
__GL.glVertex2f(x,y); __GL.glVertex2f(X,y)
__GL.glVertex2f(X,Y); __GL.glVertex2f(x,Y)
__GL.glEnd()
__GL.glLineWidth(1.0)
global doFrameCheck
if doFrameCheck: __FrameCheck(); doFrameCheck=False
def __initGUI():
global char
__pyg.font.init()
#__GL.glTexEnvf( __GL.GL_TEXTURE_ENV, __GL.GL_TEXTURE_ENV_MODE, __GL.GL_MODULATE )
F=__pyg.font.Font('fonts/tahoma.ttf',18) #don't use .fon files
def CreateCharacter(s,F):
try:
#data = __pyg.image.tostring(F.render(s, 1, (255,255,255)), 'RGBA', 1)
data = F.render(s, 1, (255,255,255)).get_buffer().raw
data_w, data_h = F.size(s)
except: return None,0,0
__GL.glEnable(__GL.GL_TEXTURE_2D)
texture = __GL.glGenTextures(1)
__GL.glBindTexture(__GL.GL_TEXTURE_2D, texture)
__GL.glTexParameteri(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_MAG_FILTER, __GL.GL_LINEAR)
__GL.glTexParameteri(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_MIN_FILTER, __GL.GL_LINEAR)
__GL.glTexImage2D(
__GL.GL_TEXTURE_2D, # target texture type
0, # number of mipmaps
__GL.GL_RGBA, # texture format
data_w, data_h,
0, #border
__GL.GL_RGBA, # input pixel format
__GL.GL_UNSIGNED_BYTE, # input data format
data) # pixel data
__GL.glDisable(__GL.GL_TEXTURE_2D)
###
list = __GL.glGenLists(1)
__GL.glNewList( list, __GL.GL_COMPILE )
__GL.glBindTexture(__GL.GL_TEXTURE_2D, texture)
__GL.glBegin(__GL.GL_QUADS)
__GL.glTexCoord2f(0.0, 0.0); __GL.glVertex2f(0.0, 0.0)
__GL.glTexCoord2f(0.0, 1.0); __GL.glVertex2f(0.0, data_h)
__GL.glTexCoord2f(1.0, 1.0); __GL.glVertex2f(data_w, data_h)
__GL.glTexCoord2f(1.0, 0.0); __GL.glVertex2f(data_w, 0.0)
__GL.glEnd()
__GL.glEndList()
return list, data_w, data_h
char = {chr(c): CreateCharacter(chr(c),F) for c in range(256)} # list,w,h = char['0']
|
|
from ctypes import *
import os
# Function definitions for C api
buffer_operation_func_t = CFUNCTYPE(c_int32, POINTER(c_uint8), c_uint32)
check_operation_func_t = CFUNCTYPE(c_int32)
# transport struct definition for C api
class TM_transport(Structure):
_fields_ = [("read", buffer_operation_func_t),
("readable", check_operation_func_t),
("write", buffer_operation_func_t),
("writeable", check_operation_func_t) ]
# user data struct for
class TM_state(Structure):
_fields_ = [("count", c_uint32)]
class TM_msg(Structure):
_fields_ = [("type", c_uint32),# WARNING : No way of telling it's actually uin32
("topic", c_char_p),
("buffer", POINTER(c_uint8)),
("size", c_uint32)]
on_frame_callback_t = CFUNCTYPE(None,POINTER(TM_state),POINTER(TM_msg))
class TelemetryCBinding:
"""
C API Abstraction over the C binding protocol implementation
"""
def __init__(self, transport, on_frame_callback):
self.transport = transport
self.on_frame_callback = on_frame_callback
lib_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),'telemetry','telemetry.dll')
lib_crc16 = os.path.join(os.path.dirname(os.path.dirname(__file__)),'telemetry','crc16.dll')
lib_framing = os.path.join(os.path.dirname(os.path.dirname(__file__)),'telemetry','framing.dll')
self.crc16 = CDLL(lib_crc16)
self.framing = CDLL(lib_framing)
self.api = CDLL(lib_path)
# Interface types definition
self.api.init_telemetry.argtypes = [POINTER(TM_state),POINTER(TM_transport)]
self.api.publish.argtypes = [c_char_p, c_char_p]
self.api.publish_u8.argtypes = [c_char_p, c_uint8]
self.api.publish_u16.argtypes = [c_char_p, c_uint16]
self.api.publish_u32.argtypes = [c_char_p, c_uint32]
self.api.publish_i8.argtypes = [c_char_p, c_int8]
self.api.publish_i16.argtypes = [c_char_p, c_int16]
self.api.publish_i32.argtypes = [c_char_p, c_int32]
self.api.publish_f32.argtypes = [c_char_p, c_float]
self.api.update_telemetry.argtypes = [c_float]
self.api.emplace.argtypes = [POINTER(TM_msg),c_char_p,c_uint32]
self.api.emplace_u8.argtypes = [POINTER(TM_msg),POINTER(c_uint8)]
self.api.emplace_u16.argtypes = [POINTER(TM_msg),POINTER(c_uint16)]
self.api.emplace_u32.argtypes = [POINTER(TM_msg),POINTER(c_uint32)]
self.api.emplace_i8.argtypes = [POINTER(TM_msg),POINTER(c_int8)]
self.api.emplace_i16.argtypes = [POINTER(TM_msg),POINTER(c_int16)]
self.api.emplace_i32.argtypes = [POINTER(TM_msg),POINTER(c_int32)]
self.api.emplace_f32.argtypes = [POINTER(TM_msg),POINTER(c_float)]
# Storing closures for C API callback
# See http://stackoverflow.com/questions/7259794/how-can-i-get-methods-to-work-as-callbacks-with-python-ctypes
self.__read = self.__get_read_cb()
self.__write = self.__get_write_cb()
self.__readable = self.__get_readable_cb()
self.__writeable = self.__get_writeable_cb()
self.__on_frame = self.__get_on_frame_cb()
# api initialization - store t and u to avoid garbage collection
self.t = TM_transport(self.__read,self.__readable,self.__write,self.__writeable)
self.u = TM_state(0)
self.api.init_telemetry(byref(self.u),byref(self.t))
self.api.subscribe(self.__on_frame)
def update(self):
self.api.update_telemetry(0)
def publish(self, topic, data, datatype):
"""
"""
if datatype == 'string':
self.api.publish(topic.encode(encoding='ascii'),data.encode(encoding='ascii'))
elif datatype == 'uint8':
self.api.publish_u8(topic.encode(encoding='ascii'), data)
elif datatype == 'uint16':
self.api.publish_u16(topic.encode(encoding='ascii'), data)
elif datatype == 'uint32':
self.api.publish_u32(topic.encode(encoding='ascii'), data)
elif datatype == 'int8':
self.api.publish_i8(topic.encode(encoding='ascii'), data)
elif datatype == 'int16':
self.api.publish_i16(topic.encode(encoding='ascii'), data)
elif datatype == 'int32':
self.api.publish_i32(topic.encode(encoding='ascii'), data)
elif datatype == 'float32':
self.api.publish_f32(topic.encode(encoding='ascii'), data)
def __get_on_frame_cb(self):
def on_frame(state,msg):
topic = msg.contents.topic.decode(encoding='utf-8')
payload = None
# cast buffer to string
if msg.contents.type == 7 :
# Create a char * (+ 1 to have enough space)
cbuf = create_string_buffer(msg.contents.size + 1)
# Use api to format data correctly
self.api.emplace(msg,cbuf,msg.contents.size + 1)
# Convert bytes code to utf-8
payload = cbuf.value.decode('utf-8')
# cast buffer to uint8
elif msg.contents.type == 1 :
cbuf = c_uint8()
# Use api to format data correctly
self.api.emplace_u8(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
# cast buffer to uint16
elif msg.contents.type == 2 :
cbuf = c_uint16()
# Use api to format data correctly
self.api.emplace_u16(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
# cast buffer to uint32
elif msg.contents.type == 3 :
cbuf = c_uint32()
# Use api to format data correctly
self.api.emplace_u32(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
# cast buffer to int8
elif msg.contents.type == 4 :
cbuf = c_int8()
# Use api to format data correctly
self.api.emplace_i8(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
# cast buffer to int16
elif msg.contents.type == 5 :
cbuf = c_int16()
# Use api to format data correctly
self.api.emplace_i16(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
# cast buffer to int32
elif msg.contents.type == 6 :
# Create a int32
cbuf = c_int32()
# Use api to format data correctly
self.api.emplace_i32(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
# cast buffer to float32
elif msg.contents.type == 0 :
# Create a int32
cbuf = c_float()
# Use api to format data correctly
self.api.emplace_f32(msg,byref(cbuf))
# Store decoded data
payload = cbuf.value
self.on_frame_callback(topic,payload)
return on_frame_callback_t(on_frame)
def __get_read_cb(self):
def read(uint8_ptr, data_size):
# Read the data
data = self.transport.read(maxbytes=data_size)
if len(data) > data_size:
return 0
for i in range(len(data)):
uint8_ptr[i] = data[i]
return len(data)
return buffer_operation_func_t(read)
def __get_write_cb(self):
def write(uint8_t_ptr, data_size):
data = []
for i in range(data_size):
data.append(uint8_t_ptr[i])
self.transport.write(data)
return 0
return buffer_operation_func_t(write)
def __get_readable_cb(self):
def readable():
return self.transport.readable()
return check_operation_func_t(readable)
def __get_writeable_cb(self):
def writeable():
return self.transport.writeable()
return check_operation_func_t(writeable)
|
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import sys
import threading
import traceback
import IECore
import Gaffer
import GafferDispatch
import GafferUI
## A dialogue which can be used to dispatch tasks
class DispatchDialogue( GafferUI.Dialogue ) :
## Defines what happens when the tasks have been successfully dispatched :
#
# Close : The dialogue is closed immediately.
#
# Confirm : The dialogue remains open confirming success, with a button for returning to the editing state.
PostDispatchBehaviour = IECore.Enum.create( "Close", "Confirm" )
__dispatchDialogueMenuDefinition = None
def __init__( self, tasks, dispatchers, nodesToShow, postDispatchBehaviour=PostDispatchBehaviour.Confirm, title="Dispatch Tasks", sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
self._getWidget().setBorderStyle( GafferUI.Frame.BorderStyle.None_ )
self.__dispatchers = dispatchers
self.__tasks = tasks
self.__nodesToShow = nodesToShow
self.__script = tasks[0].scriptNode()
# hold a reference to the script window so plugs which launch child windows work properly.
# this is necessary for PlugValueWidgets like color swatches and ramps. Ideally those widgets
# wouldn't rely on the existence of a ScriptWindow and we could drop this acquisition.
self.__scriptWindow = GafferUI.ScriptWindow.acquire( self.__script )
self.__postDispatchBehaviour = postDispatchBehaviour
# build tabs for all the node, dispatcher, and context settings
with GafferUI.ListContainer() as self.__settings :
mainMenu = GafferUI.MenuBar( self.menuDefinition() )
mainMenu.setVisible( False )
with GafferUI.TabbedContainer() as self.__tabs :
for node in self.__nodesToShow :
nodeFrame = GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None_, borderWidth=0 )
nodeFrame.addChild( self.__nodeEditor( node ) )
# remove the per-node execute button
Gaffer.Metadata.registerValue( node, "layout:customWidget:dispatchButton:widgetType", "", persistent = False )
self.__tabs.setLabel( nodeFrame, node.relativeName( self.__script ) )
with GafferUI.ListContainer() as dispatcherTab :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=2, borderWidth=4 ) as dispatcherMenuColumn :
GafferUI.Label( "<h4>Dispatcher</h4>" )
self.__dispatchersMenu = GafferUI.MultiSelectionMenu( allowMultipleSelection = False, allowEmptySelection = False )
self.__dispatchersMenu.append( [ x.getName() for x in self.__dispatchers ] )
self.__dispatchersMenu.setSelection( [ self.__dispatchers[0].getName() ] )
self.__dispatchersMenuChanged = self.__dispatchersMenu.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__dispatcherChanged ) )
dispatcherMenuColumn.setVisible( len(self.__dispatchers) > 1 )
self.__dispatcherFrame = GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None_, borderWidth=0 )
self.__tabs.setLabel( dispatcherTab, "Dispatcher" )
with GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None_, borderWidth=4 ) as contextTab :
GafferUI.PlugValueWidget.create( self.__script["variables"] )
self.__tabs.setLabel( contextTab, "Context Variables" )
# build a ui element for progress feedback and messages
with GafferUI.ListContainer( spacing = 4 ) as self.__progressUI :
with GafferUI.ListContainer( parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center, "verticalAlignment" : GafferUI.VerticalAlignment.Center } ) :
self.__progressIconFrame = GafferUI.Frame( borderStyle = GafferUI.Frame.BorderStyle.None_, parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center } )
self.__progressLabel = GafferUI.Label( parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center } )
with GafferUI.Collapsible( "Details", collapsed = True, parenting = { "expand" : True } ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget()
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleConneciton = self.__messageCollapsible.stateChangedSignal().connect( Gaffer.WeakMethod( self.__messageCollapsibleChanged ) )
self.__backButton = self._addButton( "Back" )
self.__backButtonConnection = self.__backButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateSettings ) )
self.__primaryButton = self._addButton( "Dispatch" )
self.__setDispatcher( dispatchers[0] )
self.__initiateSettings( self.__primaryButton )
@staticmethod
def createWithDefaultDispatchers( tasks, nodesToShow, defaultDispatcherType=None, postDispatchBehaviour=PostDispatchBehaviour.Confirm, title="Dispatch Tasks", sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
defaultType = defaultDispatcherType if defaultDispatcherType else GafferDispatch.Dispatcher.getDefaultDispatcherType()
dispatcherTypes = list(GafferDispatch.Dispatcher.registeredDispatchers())
if defaultType and defaultType in dispatcherTypes :
dispatcherTypes.remove( defaultType )
dispatcherTypes.insert( 0, defaultType )
dispatchers = []
for key in dispatcherTypes :
dispatcher = GafferDispatch.Dispatcher.create( key )
Gaffer.NodeAlgo.applyUserDefaults( dispatcher )
dispatchers.append( dispatcher )
return DispatchDialogue( tasks, dispatchers, nodesToShow, postDispatchBehaviour=postDispatchBehaviour, title = title, sizeMode = sizeMode, **kw )
def scriptNode( self ) :
return self.__script
def setVisible( self, visible ) :
if visible :
# See comment in `GafferUI.NodeSetEditor.acquire()`
self._qtWidget().resize( 400, 400 )
GafferUI.Window.setVisible( self, visible )
## Returns an IECore.MenuDefinition which is used to define the keyboard shortcuts for all DispatchDialogues.
# This can be edited at any time to modify subsequently created DispatchDialogues.
# Typically editing would be done as part of gaffer startup. Note that this menu is never shown to users,
# but we need it in order to register keyboard shortcuts.
@classmethod
def menuDefinition( cls ) :
if cls.__dispatchDialogueMenuDefinition is None :
cls.__dispatchDialogueMenuDefinition = IECore.MenuDefinition()
return cls.__dispatchDialogueMenuDefinition
def __nodeEditor( self, node ) :
editor = GafferUI.NodeEditor( self.__script )
editor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
## \todo: Expose public API for the NodeEditor's NameWidget visibility
editor._NodeEditor__nameWidget.setVisible( False )
editor._NodeEditor__nameWidget.parent()[0].setVisible( False )
return editor
def __setDispatcher( self, dispatcher ) :
self.__currentDispatcher = dispatcher
self.__dispatcherFrame.setChild( self.__nodeEditor( self.__currentDispatcher ) )
def __dispatcherChanged( self, menu ) :
for dispatcher in self.__dispatchers :
if dispatcher.getName() == menu.getSelection()[0] :
self.__setDispatcher( dispatcher )
return
def __initiateSettings( self, button ) :
self.__backButton.setEnabled( False )
self.__backButton.setVisible( False )
self.__primaryButton.setText( "Dispatch" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateDispatch ) )
self.__tabs.setCurrent( self.__tabs[0] )
self._getWidget().setChild( self.__settings )
def __initiateDispatch( self, button ) :
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Dispatching...</h3>" )
self.__backButton.setVisible( False )
self.__backButton.setEnabled( False )
self.__primaryButton.setVisible( False )
self.__primaryButton.setEnabled( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self._getWidget().setChild( self.__progressUI )
threading.Thread( target = self.__dispatch ).start()
def __dispatch( self ) :
try :
with self.__messageWidget.messageHandler() :
with self.__script.context() :
self.__currentDispatcher.dispatch( self.__tasks )
result = 0
except Exception as e :
result = sys.exc_info()
GafferUI.EventLoop.executeOnUIThread( functools.partial( self.__finish, result ) )
def __finish( self, result ) :
if result == 0 :
self.__initiateResultDisplay()
else :
self.__initiateErrorDisplay( result )
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "failure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
# this works for RuntimeError, but is this safe for all exceptions?
excType, excValue, excTrace = exceptionInfo
if excValue and excValue.message:
userFriendlyException = excValue.message.strip( "\n" ).split( "\n" )[-1]
else:
userFriendlyException = str( excType.__name__ )
userFriendlyException += "\nSee DEBUG messages for more information."
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Dispatching {nodes}".format( nodes = str( [ task.relativeName( self.__script ) for task in self.__tasks ] ) ),
userFriendlyException,
)
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButton._qtWidget().setFocus()
self.__primaryButton.setText( "Quit" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
def __initiateResultDisplay( self ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems and self.__postDispatchBehaviour == self.PostDispatchBehaviour.Close :
self.close()
return
self.__progressIconFrame.setChild(
GafferUI.Image( "successWarning.png" if problems else "success.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageCollapsible.setVisible( self.__messageWidget.messageCount() )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__primaryButton.setText( "Ok" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__primaryButton._qtWidget().setFocus()
def __close( self, *unused ) :
self.close()
def __messageCollapsibleChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleConneciton = None
|
|
from PySide import QtSql, QtCore, QtGui
from openpyxl import Workbook, load_workbook
import datetime
import Util
import shutil
import os
tableMember = None
tableGear = None
tableTransaction = None
tableArchiveTrans = None
tableAdmin = None
tableFinancialTran = None
tableGearMaintenance = None
fullFirstLast = None
gearNameIDList = None
class QueryError(Exception):
def __init__(self, query, loc):
self.query = query
self.loc = loc
def __str__(self):
errorMessage = '''\n
Error : {e}
{t}
Location: {l}
Query : {q}
'''.format(e=self.query.lastError(),
t=self.query.lastError().type(),
q=self.query.lastQuery(),
l=self.loc)
return errorMessage
class DictList(list):
def __init__(self, item_list):
self._list = []
self._dict = {}
for n in xrange(len(item_list)):
self._dict[item_list[n]] = n
self._list.append(item_list[n])
def __getitem__(self, key):
if isinstance(key, int):
return self._list[key]
elif isinstance(key, (basestring, unicode)):
return self._dict[key]
else:
raise ValueError
def getList(self):
return self._list
def getDict(self):
return self._dict
def nCerts(self):
return len(self._list)
class QueryTools(object):
def execQuery(self, cmd, functionNameLoc):
query = QtSql.QSqlQuery(cmd)
if query.lastError().type() != QtSql.QSqlError.NoError:
raise QueryError(query, functionNameLoc)
return query
def getQuery(self, findAttr, table, searchAtt=None, search=None, sort=None):
cmd = 'SELECT {0} FROM {1}'.format(findAttr, table)
if isinstance(searchAtt, list) and isinstance(search, list):
if searchAtt and search:
for n in xrange(len(searchAtt)):
if n == 0: cmd += ' WHERE'
if n > 0: cmd += ' AND'
if isinstance(search[n], (basestring, unicode)):
search[n] = '"{0}"'.format(search[n].replace('"', '""'))
cmd += ' {0}={1}'.format(searchAtt[n], search[n])
elif isinstance(searchAtt, (basestring, unicode)) and isinstance(search, (basestring, unicode)):
cmd += ' WHERE {0}="{1}"'.format(searchAtt, search.replace('"', '""'))
if sort:
cmd += ' {0}'.format(sort)
query = self.execQuery(cmd, 'database.py -> getQuery')
return query
def setFieldTo(self, table, attr, val):
# If val is a string, append " to beginning and end
if isinstance(val, (basestring, unicode)):
val = '"{0}"'.format(val)
cmd = 'UPDATE {0} SET {1}={2}'.format(table, attr, val)
self.execQuery(cmd, 'database.py -> querySetFieldTo')
def qLen(self, query):
# Copy the query
q = query
# Set the query at the end of the query
q.last()
# .at() + 1, when on the last index of the query is the length of the query
lenQ = q.at() + 1
return lenQ
def getSemesterDates(self):
semNames = ['SemFallStart', 'SemFallEnd', 'SemSprStart', 'SemSprEnd', 'SemSumStart', 'SemSumEnd']
semDates = {}
query = self.getQuery(', '.join(semNames), 'Admin')
query.first()
for n, sName in enumerate(semNames):
semDates[sName] = Util.convert_date('DB2Qt', query.value(n))
return semDates
class Table(QueryTools):
def __init__(self, entries):
self.name = entries[0]
self.fields = []
cmd = 'CREATE TABLE IF NOT EXISTS {0} ('.format(self.name)
for n in xrange(1, len(entries)):
if isinstance(entries[n], list):
self.fields.append(entries[n][0])
cmd += '{0} {1}, '.format(entries[n][0], entries[n][1])
cmd += ')'
if not isinstance(entries[-1], list):
cmd = cmd.replace(', )', ' {0})'.format(entries[-1]))
else:
cmd = cmd.replace(', )', ')')
self.execQuery(cmd, 'database.py -> Table -> __init__')
class TableBase(Table):
def __init__(self):
# self.CertDef = DictList(['LSport', 'LTrad', 'LIce', 'KayakRoll'])
self.paymentType = DictList(['Rental', 'Late Fee', 'Damages', 'Refund', 'Other'])
self.memberStatus = DictList(['Regular', 'President', 'Vice President',
'Treasurer', 'Secretary', 'Gear Manager', 'Honorary'])
self.studentStatus = DictList(['Undergrad', 'Graduate', 'Alumni', 'Non-Student'])
self.gear_category = DictList([str(n) for n in range(1, 9)])
def table_entry(self, table, *args):
if table and args:
attr = []
search = []
for n in xrange(0, len(args) / 2):
attr.append(args[n])
search.append(args[len(args) / 2 + n])
# If an empty field is found, there will be no member match. return None
if isinstance(search, (list, tuple)):
for entry in search:
if not entry:
return None
# Replace " with ""
for n in xrange(len(search)):
if isinstance(search[n], (basestring, unicode)):
search[n] = search[n].replace('"', '""')
cmd = 'SELECT * FROM {0} WHERE {1}="{2}"'.format(table.name, attr[0], search[0])
for i in xrange(1, len(attr)):
cmd += ' AND {0}="{1}"'.format(attr[i], search[i])
query = self.execQuery(cmd, 'database.py -> TableBase -> tableEntry')
if self.qLen(query) == 1:
query.first()
for field in table.fields:
fInd = query.record().indexOf(field)
self.__dict__[field] = query.value(fInd)
if 'DATETIME' in field.upper():
self.__dict__[field] = QtCore.QDateTime.fromString(self.__dict__[field], Util.DB_DateTime)
elif 'DATE' in field.upper() or 'BIRTHDAY' in field.upper():
self.__dict__[field] = Util.convert_date('DB2Qt', self.__dict__[field])
return True
return False
@staticmethod
def split_name(name):
first_name, last_name = None, None
if ',' in name:
name = "{0} {1}".format(name.split(',')[1].strip(), name.split(',')[0].strip())
if name and fullFirstLast:
try:
ind = [x.upper() for x in fullFirstLast[0]].index(name.upper())
first_name = fullFirstLast[1][ind]
last_name = fullFirstLast[2][ind]
except ValueError:
pass
return first_name, last_name
class Member(TableBase):
def __init__(self, *args):
super(Member, self).__init__()
self._isDefined = False
# If args has empty entries, no search was provided, return None
if not args:
return
if len(args) == 1:
self._isDefined = self.table_entry(tableMember, 'ID', args[0])
elif len(args) == 2 or len(args) == 3:
if len(args) == 2:
if isinstance(args[0], QtGui.QLineEdit):
names = args[0].text()
elif isinstance(args[0], QtGui.QComboBox):
names = args[0].currentText()
if isinstance(args[1], QtGui.QLineEdit):
birth_day = args[1].dateDB()
elif isinstance(args[1], QtGui.QComboBox):
birth_day = Util.convert_date('Disp2DB', args[1].currentText())
first_name, last_name = self.split_name(names)
elif len(args) == 3:
first_name, last_name = args[0], args[1]
birth_day = args[2]
if first_name and last_name and birth_day:
self._isDefined = self.table_entry(tableMember,
'FirstName', 'LastName', 'Birthday',
first_name, last_name, birth_day)
def __nonzero__(self):
return self._isDefined
def eligibleToCheckOut(self):
return self.formsCurrent() and (self.campusLink() or self.campus_link_waived())
def formsCurrent(self):
return self.FormsDate >= self.getSemesterDates()['SemFallStart']
def campusLink(self):
return self.CampusLinkDate <= QtCore.QDate.currentDate()
def campus_link_waived(self):
return self.StudStat == self.studentStatus[2] or self.StudStat == self.studentStatus[3]
def has_active_transactions(self):
cmd = 'SELECT GID FROM Transactions WHERE MID={0}'.format(self.ID)
query = self.execQuery(cmd, 'database.py -> Member -> hasActiveTransactions')
return self.qLen(query) >= 1
def hasReqCerts(self, gear):
# Loop through all defined certifications
for cert in Util.certifications:
if self.__dict__[cert + 'Cert'] < gear.__dict__[cert + 'Cert']:
return False
return True
def is_currently_paid(self):
query = self.getQuery('Date', 'FinancialTrans', searchAtt=['MID', 'Type'], search=[self.ID, 'Rental'], sort='ORDER BY Date DESC')
if query and self.qLen(query) >= 1:
query.first()
paid_date = Util.convert_date('DB2Qt', query.value(0))
semester_dates = self.getSemesterDates()
# Find out which semester is the current semester
today = QtCore.QDate.currentDate()
# Include the break following the current semester
if semester_dates['SemFallStart'] <= today < semester_dates['SemSprStart']:
if semester_dates['SemFallStart'] <= paid_date < semester_dates['SemSprStart']:
return True
if semester_dates['SemSprStart'] <= today < semester_dates['SemSumStart']:
if semester_dates['SemSprStart'] <= paid_date < semester_dates['SemSumStart']:
return True
if semester_dates['SemSumStart'] <= today < semester_dates['SemSumEnd']:
if semester_dates['SemSumStart'] <= paid_date < semester_dates['SemSumEnd']:
return True
return False
def full_name(self):
return '{0} {1}'.format(self.FirstName, self.LastName)
def nameBDay(self):
return '{0}, {1}'.format(self.full_name(), Util.convert_date('Qt2Disp', self.Birthday))
class Gear(TableBase):
def __init__(self, *args):
super(Gear, self).__init__()
self._isDefined = False
# Get the ID from *args
for arg in args:
if isinstance(arg, QtGui.QComboBox):
arg = arg.currentText()
elif isinstance(arg, QtGui.QLineEdit):
arg = arg.text()
try:
gearID = [s for s in gearNameIDList[1] if arg.upper() == s.upper()]
except:
gearID = args
if gearID:
global tableGear
self._isDefined = self.table_entry(tableGear, 'ID', arg)
def __nonzero__(self):
return self._isDefined
def is_checkoutable(self):
return not self.Unrentable and self.numAvailable() >= 1
def numInInventory(self):
query = self.getQuery('Quantity', 'Gear', searchAtt='ID', search=self.ID)
query.first()
numInv = query.record().value(0)
return numInv
def numCheckedOut(self):
MID_List = self.whoHasMe()
return len(MID_List)
def numAvailable(self):
return self.numInInventory() - self.numCheckedOut()
def whoHasMe(self):
query = self.getQuery('MID', 'Transactions', searchAtt='GID', search=self.ID)
MID_List = []
while query.next():
MID_List.append(query.value(0))
return MID_List
class Trans(TableBase, list):
def __init__(self, member=None, gear=None, transaction_id=None):
super(Trans, self).__init__()
if not member and not gear and transaction_id:
self.appendTrans(transaction_id)
elif (member or gear) and not transaction_id:
cmd = 'SELECT TID FROM {0} WHERE '.format(tableTransaction.name)
if member:
cmd += ' {}={}'.format('MID', member.ID)
if gear:
if member:
cmd += ' AND'
cmd += ' {}="{}"'.format('GID', gear.ID)
cmd += ' ORDER BY DueDate DESC'
query = self.execQuery(cmd, 'database.py -> getTrans')
while query.next():
transaction_id = query.value(0)
self.appendTrans(transaction_id)
def __nonzero__(self):
return len(self) >= 1
def appendTrans(self, TID):
trans = TableBase()
trans.table_entry(tableTransaction, 'TID', TID)
self.append(trans)
def hasMID(self, member_id):
for trans in self:
if trans.MID == member_id:
return True
return False
def hasGID(self, gear_id):
for trans in self:
if trans.GID == gear_id:
return True
return False
class Database(TableBase):
def __init__(self, parent, name_db=None):
super(Database, self).__init__()
self.parent = parent
self.getMember = Member
self.getGear = Gear
self.getTrans = Trans
if name_db:
self.nameDB = name_db
self.SQLDB = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.SQLDB.setDatabaseName(self.nameDB)
self.SQLDB.open()
# Check to see that the database was opened
if not self.SQLDB.isOpen():
print 'ERROR opening the database!'
raise QtSql.QSqlError.ConnectionError
def certDef(certification):
return [certification + 'Cert', 'INTEGER DEFAULT 0']
def mkCertFields(certification):
return [certDef(certification),
[certification + 'CertDate', 'TEXT'],
[certification + 'CertVouch', 'TEXT']]
self.tableMemberDef = ['Member',
['LastName' , 'TEXT'],
['FirstName' , 'TEXT'],
['ID' , 'INTEGER PRIMARY KEY AUTOINCREMENT'],
['StudentID' , 'TEXT UNIQUE DEFAULT NULL'],
['Email' , 'TEXT'],
['Phone' , 'TEXT'],
['Birthday' , 'TEXT'],
['FormsDate' , 'TEXT'],
['CampusLinkDate', 'TEXT'], ['MembStat', 'TEXT'], ['StudStat', 'TEXT'],
['Street' , 'TEXT'], ['City' , 'TEXT'], ['State' , 'TEXT'], ['Zip' , 'TEXT'],
['EmName' , 'TEXT'], ['EmRel' , 'TEXT'], ['EmPhoneH', 'TEXT'], ['EmPhoneW', 'TEXT'], ['EmPhoneC', 'TEXT'] ]
for cert in Util.certifications:
self.tableMemberDef += mkCertFields(cert)
self.tableMemberDef += [
['RoommateName', 'TEXT'], ['RoommatePhone', 'TEXT'],
['InsurName' , 'TEXT'], ['InsurPol' , 'TEXT'], ['InsurGrp', 'TEXT'],
['Med' , 'TEXT'], ['Note' , 'TEXT'],
['LastUpdated' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
', CONSTRAINT unq UNIQUE (FirstName, LastName, Birthday)']
self.tableGearDef = ['Gear',
['Name' , 'TEXT COLLATE NOCASE'],
['ID' , 'TEXT COLLATE NOCASE'],
['Quantity' , 'INTEGER DEFAULT 1' ],
['Price' , 'REAL DEFAULT 0.00' ],
['Category' , 'INTEGER DEFAULT 0' ],
['Weight' , 'REAL DEFAULT 0.0' ],
['PurchaseDate' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['ExpirationDate' , "TEXT DEFAULT ''" ],
['Manufacturer' , 'TEXT COLLATE NOCASE'],
['Unrentable' , 'INTEGER DEFAULT 0' ],
['UnrentableReason', "TEXT DEFAULT ''" ]]
for cert in Util.certifications:
self.tableGearDef += [certDef(cert)]
self.tableGearDef += [
['CareMaintenance', "TEXT DEFAULT ''"],
['Misc' , "TEXT DEFAULT ''"],
['LastUpdated' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
', UNIQUE (ID)']
self.tableRetiredGearDef = ['RetiredGear',
['Name' , 'TEXT COLLATE NOCASE'],
['ID' , 'TEXT COLLATE NOCASE'],
['Quantity' , 'INTEGER DEFAULT 1' ],
['Price' , 'REAL DEFAULT 0.00' ],
['Category' , 'INTEGER DEFAULT 0' ],
['Weight' , 'REAL DEFAULT 0.0' ],
['PurchaseDate' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['ExpirationDate', "TEXT DEFAULT ''" ],
['Manufacturer' , 'TEXT COLLATE NOCASE']]
for cert in Util.certifications:
self.tableRetiredGearDef += [certDef(cert)]
self.tableRetiredGearDef += [
['CareMaintenance', "TEXT DEFAULT ''"],
['Misc' , "TEXT DEFAULT ''"],
['RetiredDate' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"] ]
self.tableTransactionDef = ['Transactions',
['TID' , 'INTEGER PRIMARY KEY'],
['MID' , 'INTEGER' ],
['GID' , 'TEXT' ],
['CheckOutDateTime', 'TEXT' ],
['DueDate' , 'TEXT' ],
', FOREIGN KEY(MID) REFERENCES Member(ID), FOREIGN KEY(GID) REFERENCES Gear(ID)']
self.tableArchiveTranDef = ['ArchiveTrans',
['MID_OUT' , 'INTEGER'],
['MID_IN' , 'INTEGER'],
['GID' , 'TEXT' ],
['CheckOutDateTime', 'TEXT' ],
['DueDate' , 'TEXT' ],
['CheckInDateTime' , 'TEXT' ],
', FOREIGN KEY(MID_OUT) REFERENCES Member(ID), FOREIGN KEY(MID_IN) REFERENCES Member(ID), FOREIGN KEY(GID) REFERENCES Gear(ID)']
self.tableFinancialTranDef = ['FinancialTrans',
['MID' , 'INTEGER'],
['Date' , 'TEXT' ],
['Type' , 'TEXT' ],
['Amount' , 'REAL' ],
['Comment', 'TEXT' ],
', FOREIGN KEY(MID) REFERENCES Member(ID)']
self.tableGearMaintenanceDef = ['Maintenance',
['GID' , 'TEXT'],
['Date', 'TEXT'],
['Text', 'TEXT'],
',FOREIGN KEY(GID) REFERENCES Gear(ID)']
self.tableSettingsDef = ['Admin',
['DayOfMeetings', 'INTEGER DEFAULT 1'],
['MeetingFreq' , 'INTEGER DEFAULT 1'],
['RentalFee' , 'REAL DEFAULT 0.0' ],
['SemFallStart' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['SemFallEnd' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['SemSprStart' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['SemSprEnd' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['SemSumStart' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"],
['SemSumEnd' , "TEXT DEFAULT (date(CURRENT_TIMESTAMP, 'localtime'))"]]
self.tableDefs = {self.tableMemberDef[0]: self.tableMemberDef,
self.tableGearDef[0]: self.tableGearDef,
self.tableTransactionDef[0]: self.tableTransactionDef,
self.tableArchiveTranDef[0]: self.tableArchiveTranDef,
self.tableSettingsDef[0]: self.tableSettingsDef,
self.tableFinancialTranDef[0]: self.tableFinancialTranDef,
self.tableGearMaintenanceDef[0]: self.tableGearMaintenanceDef,
self.tableRetiredGearDef[0]: self.tableRetiredGearDef}
# self.execQuery("DROP TABLE IF EXISTS Member" , 'database.py -> initDB, Member' ); print 'Member dropped'
# self.execQuery("DROP TABLE IF EXISTS Gear" , 'database.py -> initDB, Gear' ); print 'Gear dropped'
# self.execQuery("DROP TABLE IF EXISTS Transactions" , 'database.py -> initDB, Transactions' ); print 'Transactions dropped'
# self.execQuery("DROP TABLE IF EXISTS ArchiveTrans" , 'database.py -> initDB, ArchiveTrans' ); print 'ArchiveTrans dropped'
# self.execQuery("DROP TABLE IF EXISTS Admin" , 'database.py -> initDB, Admin' ); print 'Admin dropped'
# self.execQuery("DROP TABLE IF EXISTS FinancialTrans", 'database.py -> initDB, FinancialTrans'); print 'FinancialTrans dropped'
# self.execQuery("DROP TABLE IF EXISTS Maintenance" , 'database.py -> initDB, Maintenance' ); print 'Maintenance dropped'
# self.execQuery("ALTER TABLE Member ADD StudStat INTEGER DEFAULT 0", 'database.py -> initDB, ALTER TABLE')
# Set settings
self.execQuery('PRAGMA synchronous = OFF' , 'database.py -> Database -> __init__')
self.execQuery('PRAGMA journal_mode = MEMORY' , 'database.py -> Database -> __init__')
# self.execQuery('PRAGMA locking_mode = EXCLUSIVE', 'database.py -> Database -> __init__')
self.execQuery('PRAGMA auto_vacuum = FULL' , 'database.py -> Database -> __init__')
self.execQuery('PRAGMA page_size = 65536' , 'database.py -> Database -> __init__')
global tableMember ; tableMember = Table(self.tableMemberDef)
global tableGear ; tableGear = Table(self.tableGearDef)
global tableTransaction ; tableTransaction = Table(self.tableTransactionDef)
global tableArchiveTrans ; tableArchiveTrans = Table(self.tableArchiveTranDef)
global tableAdmin ; tableAdmin = Table(self.tableSettingsDef)
global tableFinancialTran ; tableFinancialTran = Table(self.tableFinancialTranDef)
global tableGearMaintenance; tableGearMaintenance = Table(self.tableGearMaintenanceDef)
global tableRetiredGear ; tableRetiredGear = Table(self.tableRetiredGearDef)
# Set settings
self.execQuery('PRAGMA synchronous = OFF' , 'database.py -> Database -> __init__')
self.execQuery('PRAGMA journal_mode = MEMORY' , 'database.py -> Database -> __init__')
# self.execQuery('PRAGMA locking_mode = EXCLUSIVE', 'database.py -> Database -> __init__')
self.execQuery('PRAGMA auto_vacuum = FULL' , 'database.py -> Database -> __init__')
self.execQuery('PRAGMA page_size = 65536' , 'database.py -> Database -> __init__')
# Check to see if the admin table is empty, id so, initialize it with the default values
query = self.getQuery('*', 'Admin')
if self.qLen(query) <= 0:
self.execQuery("INSERT INTO Admin (DayOfMeetings) VALUES (1)", 'database.py -> initDB, Admin')
elif self.qLen(query) > 1:
print 'ERROR: Admin table has more than one entry'
raise ValueError
def addItem(self, table_name, attrListIn):
attr_list = []
val_list = []
table_def = self.tableDefs[table_name]
for entry in table_def:
if isinstance(entry, list) and len(entry) == 2:
attr = entry[0]
if attr in attrListIn.keys():
val = attrListIn[attr]
# Make sure that if StudentID is an empty string, it gets set to NULL
if attr == 'StudentID' and (not val or val.upper() == 'NULL'):
val = 'NULL'
if 'TEXT' in entry[1].upper() and not (attr == 'StudentID' and val == 'NULL'):
val = '"{0}"'.format(val.replace('"', '""'))
elif 'INTEGER' or 'REAL' in entry[1].upper():
# Convert from bool to int
if isinstance(val, bool):
val = int(val)
val = str(val)
attr_list.append(attr)
val_list.append(val)
cmd = 'INSERT INTO {}({}) VALUES({})'.format(table_name, ', '.join(attr_list), ', '.join(val_list))
self.execQuery(cmd, 'database.py -> addItem')
return True
def updateItem(self, table_name, attr_list, item):
cmd = []
tableDef = self.tableDefs[table_name]
# Search for the type of the current attribute
for entry in tableDef:
if isinstance(entry, list) and len(entry) == 2:
attr = entry[0]
if attr in attr_list.keys():
val = attr_list[attr]
# Make sure that if StudentID is an empty string, it gets set to NULL
if attr == 'StudentID' and (val is None or val == '' or val.upper() == 'NULL'):
val = 'NULL'
# Add quotes around strings
if 'TEXT' in entry[1].upper() and not (attr == 'StudentID' and val == 'NULL'):
val = '"{0}"'.format(val.replace('"', '""'))
# Convert from bool to int
if isinstance(val, bool):
val = int(val)
cmd.append('{}={}'.format(attr, val))
item_id = item.ID
if table_name == 'Gear':
item_id = '"{}"'.format(item_id)
cmd = 'UPDATE {} SET {} WHERE ID={}'.format(table_name, ', '.join(cmd), item_id)
self.execQuery(cmd, 'database.py -> updateItem')
return True
def delItem(self, table_name, item):
id = item.ID
if table_name == 'Gear':
id = '"{}"'.format(id)
self.archiveGear(item)
cmd = 'DELETE FROM {} WHERE ID={}'.format(table_name, id)
self.execQuery(cmd, 'database.py -> delItem')
def wTransaction(self, member, gear, due_date):
today_now = QtCore.QDateTime.currentDateTime().toString(Util.DB_DateTime)
cmd = 'INSERT INTO Transactions(MID, GID, CheckOutDateTime, DueDate) VALUES({}, "{}", "{}", "{}")'.format(
member.ID, gear.ID, today_now, due_date.toString(Util.DB_Date))
self.execQuery(cmd, 'database.py -> wTransaction')
def returnItem(self, trans, nTrans=1, retmemb=None):
for n in xrange(nTrans):
self.archiveTransaction(trans[n], retmemb)
cmd = 'DELETE FROM {} WHERE TID={}'.format(tableTransaction.name, trans[n].TID)
self.execQuery(cmd, 'database.py -> delTrans')
def archiveTransaction(self, trans, retmemb=None):
# Only archive if the item has been checked out for more than 1 hour
today_now = QtCore.QDateTime.currentDateTime()
checkOutDateTime = trans.CheckOutDateTime
if checkOutDateTime.secsTo(today_now) >= 3600:
today_now = today_now.toString(Util.DB_DateTime)
if retmemb:
return_id = retmemb.ID
else:
return_id = trans.MID
cmd = ('INSERT INTO ArchiveTrans(MID_OUT, MID_IN, GID, CheckOutDateTime, DueDate, CheckInDateTime) '
'SELECT T.MID, {0} as MID_IN, T.GID, T.CheckOutDateTime, T.DueDate, "{1}" as CheckInDateTime '
'FROM Transactions T, Member M WHERE T.TID={2} AND T.MID=M.ID').format(return_id, today_now, trans.TID)
self.execQuery(cmd, 'database.py -> archiveTransaction')
def archiveGear(self, gear):
# Only archive if the item has inventoried for more than a day
today_now = QtCore.QDate.currentDate()
check_out_date = gear.PurchaseDate
if check_out_date.daysTo(today_now) >= 1:
# today_now = today_now.toString(Util.DB_DateTime)
certs = ''
if Util.certifications:
certs = ['{}Cert'.format(c) for c in Util.certifications]
certs = ', ' + ', '.join(certs)
cmd = ('INSERT INTO RetiredGear(Name, ID, Quantity, Price, Category, Weight, PurchaseDate, '
'ExpirationDate, Manufacturer{Cert}, CareMaintenance, Misc) '
'SELECT Name, ID, Quantity, Price, Category, Weight, PurchaseDate, '
'ExpirationDate, Manufacturer{Cert}, CareMaintenance, Misc '
'FROM Gear WHERE ID="{GID}"').format(Cert=certs, GID=gear.ID)
self.execQuery(cmd, 'database.py -> archiveGear')
def fillMember(self, name, memBDay, updCurFields=True):
if updCurFields:
self.parent.currentMemberNameID = name
if name:
first_name, last_name = self.split_name(name)
if first_name and last_name:
bDayList = self.getBDayList(first_name, last_name)
oldBDayList = memBDay.get_menu()
for n in xrange(len(oldBDayList)):
oldBDayList[n] = Util.convert_date('Disp2Qt', oldBDayList[n])
if bDayList != oldBDayList:
if len(bDayList) > 1:
bDayList = [Util.selectOne] + bDayList
memBDay.setMenu(bDayList)
return
memBDay.clear()
self.parent.currentBDayBoxIndex = 0
def fill_gear(self, gearSearch, gearBox):
gNameID = gearSearch.text()
self.parent.currentGearLineEdit = gNameID
if gNameID:
query = None
gear_name = [s for s in gearNameIDList[0] if gNameID.upper() == s.upper()]
gear_id = [s for s in gearNameIDList[1] if gNameID.upper() == s.upper()]
# Is gNameID in the Name column?
if gear_name:
query = self.getQuery('ID', 'Gear', 'Name', gNameID)
# Is gNameID in the ID column?
elif gear_id:
query = self.getQuery('Name', 'Gear', 'ID', gNameID)
if query and self.qLen(query) >= 1:
item_list = self.query2list(query)
old_item_list = gearBox.get_menu()
if item_list != old_item_list:
if len(item_list) > 1:
item_list = [Util.selectOne] + item_list
gearBox.setMenu(item_list)
return
gearBox.clear()
self.parent.currentGearComboBox = 0
def isStudentIDAvailable(self, name, birthday, student_id):
student_id = student_id.strip()
# No ID is also unique
if not student_id:
return True
query = self.getQuery('FirstName, LastName, Birthday', 'Member', 'StudentID', student_id)
if self.qLen(query) == -1:
return True
elif self.qLen(query) == 1:
first_name, last_name = self.split_name(name)
query.first()
query_bDay = Util.convert_date('DB2Qt', query.record().value(2))
if first_name.upper() == query.record().value(0).upper() and last_name.upper() == query.record().value(1).upper() and birthday == query_bDay:
return True
return False
def isGearIDUnique(self, gear_id):
gear_id = gear_id.strip()
query = self.getQuery('ID', 'Gear', 'ID', gear_id)
return self.qLen(query) == -1
def setDayOfMeetings(self, dayOfMeetings):
self.setFieldTo('Admin', 'DayOfMeetings', dayOfMeetings)
def setDueDate(self, due_date):
self.setFieldTo('Admin', 'DueDate', Util.convert_date('Qt2DB', due_date))
def get_day_of_meetings(self):
cmd = 'SELECT DayOfMeetings FROM Admin'
query = self.execQuery(cmd, 'database.py -> getDayOfMeetings')
# A day was found in the table
if self.qLen(query) == 1:
query.first()
return query.record().value(0)
def getDefaultDueDate(self):
day_of_meeting = self.get_day_of_meetings()
days_until_due = day_of_meeting - QtCore.QDate.currentDate().dayOfWeek()
if days_until_due <= 0:
days_until_due += 7
next_meeting_day = QtCore.QDate.currentDate().addDays(days_until_due)
# If the next meeting day falls on a break, move it until after the break
semester_dates = self.getSemesterDates()
# While the next meeting is occurring between semesters,
# push the date back 7 days
if semester_dates['SemFallStart'].addDays(7) < semester_dates['SemFallEnd'] and \
semester_dates['SemSprStart'].addDays(7) < semester_dates['SemSprEnd'] and \
semester_dates['SemSumStart'].addDays(7) < semester_dates['SemSumEnd']:
while next_meeting_day < semester_dates['SemFallStart'] or \
semester_dates['SemFallEnd'] < next_meeting_day < semester_dates['SemSprStart'] or \
semester_dates['SemSprEnd'] < next_meeting_day < semester_dates['SemSumStart']:
next_meeting_day = next_meeting_day.addDays(7)
every_n_weeks = self.get_meeting_frequency()
next_meeting_day = next_meeting_day.addDays(7 * (every_n_weeks - 1))
return next_meeting_day
def get_meeting_frequency(self):
return self.getAttrFromTable('Admin', 'MeetingFreq')
def setMeetingFreq(self, meeting_frequency):
self.setFieldTo('Admin', 'MeetingFreq', meeting_frequency)
def getGearIDTransTable(self, row, member):
cmd = 'SELECT T.GID FROM {} M, {} G, {} T WHERE M.ID={} AND M.ID=T.MID AND T.GID=G.ID ORDER BY T.DueDate ASC'.format(tableMember.name, tableGear.name, tableTransaction.name, member.ID)
query = self.execQuery(cmd, 'database.py -> getGearTransTable')
query.seek(row)
return query.value(0)
# return query.value(query.record().indexOf('T.GID'))
def getAttrFromTable(self, table, attr, row=0, order=None):
cmd = 'SELECT {} FROM {}'.format(attr, table)
if order:
cmd += " " + order
query = self.execQuery(cmd, 'database.py -> getAttrFromTable')
query.seek(row)
return query.value(0)
def getMID_GID_row_TransTable(self, row):
cmd = 'SELECT GID, MID, TID FROM {}'.format(tableTransaction.name)
query = self.execQuery(cmd, 'database.py -> getMID_GIDTransTable')
query.seek(row)
MID = query.value(query.record().indexOf('MID'))
GID = query.value(query.record().indexOf('GID'))
cmd = 'SELECT GID FROM {} WHERE MID={} ORDER BY TID ASC'.format(tableTransaction.name, MID)
query = self.execQuery(cmd, 'database.py -> getMID_GID_row_TID_TransTable')
while query.next():
if query.value(0) == GID:
row = query.at()
break
return MID, GID, row
def getUniqueID(self, table, match):
match = match.replace('*', '%')
cmd = 'SELECT ID FROM {} WHERE ID LIKE "{}" ORDER BY ID ASC'.format(table, match)
query = self.execQuery(cmd, 'database.py -> getUniqueID')
if self.qLen(query) < 0:
return match.replace('%', '1')
else:
dbID = []
prefix, suffix = match.split('%')
query.seek(-1)
while query.next():
ID = query.value(0)
lenID = len(ID)
# Generate a list of the number portion of the gear IDs that match the match_Orig string
ID = ID.rstrip(suffix).lstrip(prefix)
if Util.is_number(ID):
dbID.append(int(ID))
# Iterate through the list of numbers. If a gap is found in the number list, use that
# missing number for the gearID. If no gap is found, use the max+1 number for the gearID.
newIDnum = None
for n in xrange(1, len(dbID)):
if dbID[n] > dbID[n - 1] + 1:
newIDnum = dbID[n - 1] + 1
break
if not newIDnum:
newIDnum = dbID[-1] + 1
nZ = lenID - len(prefix) - len(suffix)
return '{}{}{}'.format(prefix, str(newIDnum).zfill(nZ), suffix)
def getBDayList(self, fName, lName):
if fName and lName:
query = self.getQuery('Birthday', 'Member', ['FirstName', 'LastName'], [fName, lName])
if query and self.qLen(query) >= 1:
bDayList = []
query.seek(-1)
while query.next():
bDayList.append(Util.convert_date('DB2Qt', query.value(0)))
if bDayList:
return bDayList
def set_rental_price(self, price):
self.setFieldTo('Admin', 'RentalFee', price)
def getRentalPrice(self):
query = self.getQuery('RentalFee', 'Admin')
query.first()
return query.value(0)
def submitPayment(self, payTypeBox, amountBox, payDateEdit, noteEdit):
attr_list = {'MID': self.parent.currentMember.ID,
'Date': Util.convert_date('Qt2DB', payDateEdit.date()),
'Type': payTypeBox.currentText(),
'Amount': amountBox.value(),
'Comment': noteEdit.document().toPlainText().strip()}
if attr_list['Type'] == 'Refund':
attr_list['Amount'] = -attr_list['Amount']
self.addItem('FinancialTrans', attr_list)
def autoCompleter(self, findAttr, table, searchAtt=None, search=None, sort=None):
item_list = {}
attrs = findAttr.split(',')
for n in xrange(len(attrs)):
attrs[n] = attrs[n].strip()
query = self.getQuery(attrs[n], table, searchAtt, search, sort)
item_list[attrs[n]] = self.query2list(query, sort=False, ignore_duplicates=False)
# Combine the names
compList = []
if 'FirstName' in findAttr and 'LastName' in findAttr and table == 'Member':
first_name = item_list[attrs[0]]
last_name = item_list[attrs[1]]
for n in xrange(len(first_name)):
compList.append('{0} {1}'.format(first_name[n], last_name[n]))
for n in xrange(len(last_name)):
compList.append('{0}, {1}'.format(last_name[n], first_name[n]))
global fullFirstLast
fullFirstLast = [compList, first_name + first_name, last_name + last_name]
elif 'Name' in findAttr and 'ID' in findAttr and table == 'Gear':
gear_name = Util.remove_duplicates(item_list[attrs[0]])
gear_id = item_list[attrs[1]]
compList = gear_name + gear_id
global gearNameIDList
gearNameIDList = [gear_name, gear_id]
elif isinstance(findAttr, (basestring, unicode)):
query = self.getQuery(findAttr, table, searchAtt, search, sort)
compList = self.query2list(query)
# Sort the list case insensitively
compList = Util.sort_list(compList)
completer = QtGui.QCompleter(compList)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
return completer
def query2list(self, query, sort=True, ignore_duplicates=True):
item_list = []
for n in xrange(query.record().count()):
query.seek(-1)
while query.next():
val = query.value(n)
if val:
if isinstance(val, unicode):
val = str(val).strip()
item_list.append(val)
# Sort the list case insensitively
if len(item_list) > 1:
# Remove duplicates
if ignore_duplicates:
item_list = list(set(item_list))
if sort:
if isinstance(item_list[0], (basestring, unicode)):
item_list = sorted(item_list, key=lambda s: s.lower())
else:
print 'No sort available for this item type: database.py -> query2list'
return item_list
def getShitListQuery(self):
cmd = ('SELECT M.FirstName, M.LastName, G.Name, T.GID, '
'strftime("%m/%d/%Y %H:%M:%S", T.CheckOutDateTime), strftime("%m/%d/%Y", T.DueDate) '
'FROM Transactions T JOIN Member M ON T.MID=M.ID JOIN Gear G ON T.GID=G.ID '
'WHERE (JULIANDAY("NOW")-JULIANDAY(T.DueDate)) > 0 ORDER BY T.CheckOutDateTime DESC')
query = self.execQuery(cmd, 'database.py -> getShitListQuery')
return query
def exportDatabase(self, file_name):
wb = Workbook(guess_types=True)
for sheet in wb:
wb.remove_sheet(sheet)
wb = self._make_sheet(tableDef=self.tableMemberDef , title='Members' , order='LastName' , wb=wb)
wb = self._make_sheet(tableDef=self.tableGearDef , title='Gear Inventory' , order='ID' , wb=wb)
wb = self._make_sheet(tableDef=self.tableTransactionDef , title='Active Transactions' , order='DueDate' , wb=wb)
wb = self._make_sheet(tableDef=self.tableFinancialTranDef , title='Financial Transactions', order='Date' , wb=wb)
wb = self._make_sheet(tableDef=self.tableArchiveTranDef , title='Archived Transactions' , order='CheckInDateTime', wb=wb)
wb = self._make_sheet(tableDef=self.tableGearMaintenanceDef, title='Gear Maintenance' , order='Date' , wb=wb)
wb = self._make_sheet(tableDef=self.tableSettingsDef , title='Admin Settings' , order='DayOfMeetings' , wb=wb)
wb = self._make_sheet(tableDef=self.tableRetiredGearDef , title='Retired Gear' , order='RetiredDate' , wb=wb)
wb.save(file_name)
def exportShitList(self, file_name):
wb = Workbook(guess_types=True)
for sheet in wb:
wb.remove_sheet(sheet)
query = self.getShitListQuery()
header = ['FirstName', 'LastName', 'Name', 'GID', 'strftime(%m/%d/%Y %H:%M:%S, T.CheckOutDateTime)', 'strftime(%m/%d/%Y, T.DueDate)']
wb = self._make_sheet(header=header, query=query, title='Shit List', wb=wb)
wb.save(file_name)
def importDatabase(self, file_name):
from openpyxl.cell import column_index_from_string
wb = load_workbook(filename=file_name, use_iterators=True)
ws = wb.get_sheet_by_name(name='Members')
# Read and confirm the header
for row in ws.iter_rows():
membAttr = {}
for cell in row:
if cell._value:
col = column_index_from_string(cell.column)
if cell.row > 1:
# Read the remaining members
membAttr[self.impExpMemberHeader[col - 1]] = cell.value
elif cell.row == 1 and cell.value != self.impExpMemberHeader[col - 1]:
self.statBar.showMessage("Error in header in '{}'".format(file_name), Util.messageDelay)
return False
if membAttr:
member = self.getMember(membAttr['FirstName'], membAttr['LastName'], membAttr['Birthday'])
if member:
self.updateItem('Member', membAttr, member)
else:
self.addItem('Member', membAttr)
return True
def _make_sheet(self, **kwarg):
for arg in kwarg.keys():
if arg == 'tableDef':
header = [v[0] for v in kwarg[arg] if isinstance(v, list) and len(v) == 2]
table = kwarg[arg][0]
elif arg == 'title':
title = kwarg[arg]
elif arg == 'order':
order = kwarg[arg]
elif arg == 'wb':
wb = kwarg[arg]
elif arg == 'query':
query = kwarg[arg]
elif arg == 'header':
header = kwarg[arg]
ws = wb.create_sheet()
ws.title = title
# Write the header
nameShift = 0
for n, head in enumerate(header):
c = ws.cell(row=1, column=n + 1 + nameShift)
if head == 'MID' or head == 'MID_IN' or head == 'MID_OUT':
c.value = 'LastName'
if head == 'MID_IN' : c.value = 'Returned ' + c.value
if head == 'MID_OUT': c.value = 'Checked Out ' + c.value
nameShift += 1
c = ws.cell(row=1, column=n + 1 + nameShift)
c.value = 'FirstName'
if head == 'MID_IN' : c.value = 'Returned ' + c.value
if head == 'MID_OUT': c.value = 'Checked Out ' + c.value
elif head == 'GID':
c.value = 'ID'
nameShift += 1
c = ws.cell(row=1, column=n + 1 + nameShift)
c.value = 'GearName'
else:
c.value = head.split('.')[-1].strip(')')
if 'query' not in kwarg.keys():
query = self.getQuery('*', table, sort='ORDER BY ' + order + ' ASC')
# Dump the database
row = 1
while query.next():
nameShift = 0
for n, head in enumerate(header):
dumpVal = query.record().value(query.record().indexOf(head))
c = ws.cell(row=row + 1, column=n + 1 + nameShift)
if head == 'MID' or head == 'MID_IN' or head == 'MID_OUT':
mNameQuery = self.getQuery("LastName, FirstName", 'Member', searchAtt='ID', search=str(dumpVal))
mNameQuery.first()
c.value = mNameQuery.value(0)
nameShift += 1
c = ws.cell(row=row + 1, column=n + 1 + nameShift)
c.value = mNameQuery.value(1)
elif head == 'GID':
c.value = dumpVal
mNameQuery = self.getQuery("Name", 'Gear', searchAtt='ID', search=str(dumpVal))
mNameQuery.first()
nameShift += 1
c = ws.cell(row=row + 1, column=n + 1 + nameShift)
c.value = mNameQuery.value(0)
else:
c.value = dumpVal
if isinstance(c.value, (basestring, unicode)) and c.value.count('-') == 2:
y, m, d = dumpVal.split('-')
if len(y) == 4 and len(m) == 2 and len(d) == 2:
c.value = datetime.datetime(int(y), int(m), int(d))
c.number_format = Util.dateDispFormat
row += 1
return wb
def close(self):
# Reset the connector and remove it (magic)
self.SQLDB = QtSql.QSqlDatabase()
self.execQuery('VACUUM', 'Database.py -> Database -> close')
self.SQLDB.close()
self.SQLDB.removeDatabase(self.nameDB)
del self.SQLDB
# Backup the database file
backupDir = 'Backup_Databases'
if not os.path.isdir(backupDir):
os.mkdir(backupDir)
shutil.copyfile(self.nameDB, '{}/{}_{}'.format(backupDir,
QtCore.QDate.currentDate().toString('yyMMdd'),
self.nameDB))
|
|
import json
import re
from django.core.serializers.json import DjangoJSONEncoder
from django.shortcuts import render
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import permission_required
from survey.forms.logic import LogicForm
from survey.forms.filters import QuestionFilterForm, MAX_NUMBER_OF_QUESTION_DISPLAYED_PER_PAGE, DEFAULT_NUMBER_OF_QUESTION_DISPLAYED_PER_PAGE
from survey.models import AnswerRule, BatchQuestionOrder
from survey.models.batch import Batch
from survey.models.question import Question, QuestionOption
from survey.forms.question import QuestionForm
from survey.services.export_questions import ExportQuestionsService
from survey.utils.views_helper import prepend_to_keys, clean_query_params
from survey.views.custom_decorators import not_allowed_when_batch_is_open
from collections import OrderedDict
ADD_LOGIC_ON_OPEN_BATCH_ERROR_MESSAGE = "Logics cannot be added while the batch is open."
ADD_SUBQUESTION_ON_OPEN_BATCH_ERROR_MESSAGE = "Subquestions cannot be added while batch is open."
REMOVE_QUESTION_FROM_OPEN_BATCH_ERROR_MESSAGE = "Question cannot be removed from a batch while the batch is open."
def _get_questions_based_on_filter(batch_id, filter_params):
filter_params = clean_query_params(filter_params)
if batch_id:
filter_params = prepend_to_keys(filter_params, 'question__')
return BatchQuestionOrder.get_batch_order_specific_questions(batch_id, filter_params)
return Question.objects.filter(subquestion=False, **filter_params).exclude(group__name='REGISTRATION GROUP')
def _max_number_of_question_per_page(number_sent_in_request):
max_question_per_page_supplied = number_sent_in_request or 0
given_max_per_page = min(int(max_question_per_page_supplied), MAX_NUMBER_OF_QUESTION_DISPLAYED_PER_PAGE)
return max(given_max_per_page, DEFAULT_NUMBER_OF_QUESTION_DISPLAYED_PER_PAGE)
def _questions_given(batch_id, request):
filter_params = {'group__id': request.GET.get('groups', None), 'module__id': request.GET.get('modules', None),
'answer_type': request.GET.get('question_types', None)}
max_per_page = _max_number_of_question_per_page(request.GET.get('number_of_questions_per_page', 0))
return _get_questions_based_on_filter(batch_id, filter_params), max_per_page
@permission_required('auth.can_view_batches')
def index(request, batch_id):
batch = Batch.objects.get(id=batch_id) if batch_id else None
question_filter_form = QuestionFilterForm(data=request.GET)
questions, max_per_page = _questions_given(batch_id, request)
if not questions:
messages.error(request, 'There are no questions associated with this batch yet.')
question_rules_for_batch = {}
if batch:
for question in questions:
question_rules_for_batch[question] = question.rules_for_batch(batch)
context = {'questions': questions, 'request': request, 'batch': batch, 'max_question_per_page':max_per_page,
'question_filter_form': question_filter_form, 'rules_for_batch': question_rules_for_batch}
return render(request, 'questions/index.html', context)
@permission_required('auth.can_view_batches')
def filter_by_group_and_module(request, batch_id, group_id, module_id):
filter_params = clean_query_params({'group__id': group_id, 'module__id': module_id})
questions = Question.objects.filter(**filter_params).exclude(batches__id=batch_id).values('id', 'text', 'answer_type').order_by('text')
json_dump = json.dumps(list(questions), cls=DjangoJSONEncoder)
return HttpResponse(json_dump, mimetype='application/json')
@permission_required('auth.can_view_batches')
def list_all_questions(request):
batch_id = request.GET.get('batch_id', None)
question_filter_form = QuestionFilterForm(data=request.GET)
questions, max_per_page = _questions_given(batch_id, request)
context = {'questions': questions, 'request': request, 'question_filter_form': question_filter_form,
'rules_for_batch': {}, 'max_question_per_page': max_per_page}
return render(request, 'questions/index.html', context)
def _sub_question_hash(sub_question):
return {'id': str(sub_question.id), 'text': sub_question.text}
def __process_sub_question_form(request, questionform, parent_question, action_performed, batch_id=None):
if questionform.is_valid():
redirect_url = '/batches/%s/questions/' % batch_id if batch_id else '/questions/'
sub_question = questionform.save(commit=False)
sub_question.subquestion = True
sub_question.parent = parent_question
sub_question.group = parent_question.group
sub_question.save()
if request.is_ajax():
return HttpResponse(json.dumps(_sub_question_hash(sub_question)))
else:
messages.success(request, 'Sub question successfully %s.' % action_performed)
return HttpResponseRedirect(redirect_url)
else:
messages.error(request, 'Sub question not saved.')
@permission_required('auth.can_view_batches')
@not_allowed_when_batch_is_open(message=ADD_SUBQUESTION_ON_OPEN_BATCH_ERROR_MESSAGE,
redirect_url_name="batch_questions_page", url_kwargs_keys=['batch_id'])
def new_subquestion(request, question_id, batch_id=None):
parent_question = Question.objects.get(pk=question_id)
questionform = QuestionForm(parent_question=parent_question)
response = None
if request.method == 'POST':
questionform = QuestionForm(request.POST, parent_question=parent_question)
response = __process_sub_question_form(request, questionform, parent_question, 'added', batch_id)
context = {'questionform': questionform, 'button_label': 'Create', 'id': 'add-sub_question-form',
'cancel_url': '/questions/', 'parent_question': parent_question, 'class': 'question-form',
'heading': 'Add SubQuestion'}
template_name = 'questions/new.html'
if request.is_ajax():
template_name = 'questions/_add_question.html'
return response or render(request, template_name, context)
@permission_required('auth.can_view_batches')
def edit_subquestion(request, question_id, batch_id=None):
question = Question.objects.get(pk=question_id)
questionform = QuestionForm(instance=question)
response = None
if request.method == 'POST':
questionform = QuestionForm(request.POST, instance=question)
response = __process_sub_question_form(request, questionform, question.parent, 'edited', batch_id)
context = {'questionform': questionform, 'button_label': 'Save', 'id': 'add-sub_question-form',
'cancel_url': '/questions/', 'parent_question': question.parent, 'class': 'question-form',
'heading': 'Edit Subquestion'}
template_name = 'questions/new.html'
return response or render(request, template_name, context)
def _get_post_values(post_data):
next_question_key = post_data.get('next_question', None)
option_key = post_data.get('option', None)
question_key = post_data.get('validate_with_question', None)
condition_response = post_data.get('condition', None)
value_key = post_data.get('value', None)
value_min_key = post_data.get('min_value', None)
value_max_key = post_data.get('max_value', None)
save_data = {'action': post_data['action'],
'condition': condition_response or 'EQUALS_OPTION',
'next_question': Question.objects.get(id=next_question_key) if next_question_key else None,
'validate_with_option': QuestionOption.objects.get(id=option_key) if option_key else None,
'validate_with_question': Question.objects.get(id=question_key) if question_key else None
}
if value_key:
save_data['validate_with_value'] = value_key
if value_min_key:
save_data['validate_with_min_value'] = value_min_key
if value_max_key:
save_data['validate_with_max_value'] = value_max_key
return save_data
def _rule_exists(question, batch, **kwargs):
return AnswerRule.objects.filter(question=question, batch=batch, **kwargs).count() > 0
@permission_required('auth.can_view_batches')
@not_allowed_when_batch_is_open(message=ADD_LOGIC_ON_OPEN_BATCH_ERROR_MESSAGE,
redirect_url_name="batch_questions_page", url_kwargs_keys=['batch_id'])
def add_logic(request, batch_id, question_id):
question = Question.objects.get(id=question_id)
batch = Batch.objects.get(id=batch_id)
logic_form = LogicForm(question=question, batch=batch)
response = None
question_rules_for_batch = {}
question_rules_for_batch[question] = question.rules_for_batch(batch)
if request.method == "POST":
logic_form = LogicForm(data=request.POST, question=question, batch=batch)
if logic_form.is_valid():
AnswerRule.objects.create(question=question, batch=batch, **_get_post_values(request.POST))
messages.success(request, 'Logic successfully added.')
response = HttpResponseRedirect('/batches/%s/questions/' % batch_id)
context = {'logic_form': logic_form, 'button_label': 'Save', 'question': question,
'rules_for_batch': question_rules_for_batch,
'questionform': QuestionForm(parent_question=question),
'modal_action': '/questions/%s/sub_questions/new/' % question.id,
'class': 'question-form', 'batch_id': batch_id, 'batch': batch,
'cancel_url': '/batches/%s/questions/' % batch_id}
return response or render(request, "questions/logic.html", context)
@permission_required('auth.can_view_batches')
def new(request):
response, context = _render_question_view(request)
return response or render(request, 'questions/new.html', context)
def is_multichoice(request, question_id):
is_multichoice_type = False
question_options = []
try:
question = Question.objects.get(id=question_id)
is_multichoice_type = question.is_multichoice()
if is_multichoice_type:
all_options = question.options.all().order_by('order')
for option in all_options:
question_options.append({'id': option.id, 'text': option.text})
except Question.DoesNotExist:
pass
is_multichoice_question = [{'is_multichoice': is_multichoice_type, 'question_options': question_options}]
return HttpResponse(json.dumps(is_multichoice_question), mimetype='application/json')
@permission_required('auth.can_view_batches')
def edit(request, question_id):
question = Question.objects.filter(id=question_id)
if not question:
messages.error(request, "Question does not exist.")
return HttpResponseRedirect('/questions/')
response, context = _render_question_view(request, question[0])
return response or render(request, 'questions/new.html', context)
@permission_required('auth.can_view_batches')
def delete(request, question_id, batch_id=None):
question = Question.objects.filter(pk=question_id)
redirect_url = '/batches/%s/questions/' % batch_id if batch_id else '/questions/'
if question:
success_message = "%s successfully deleted."
messages.success(request, success_message % ("Sub question" if question[0].subquestion else "Question"))
else:
messages.error(request, "Question / Subquestion does not exist.")
question.delete()
return HttpResponseRedirect(redirect_url)
def _process_question_form(request, options, response, instance=None):
question_form = QuestionForm(data=request.POST, instance=instance)
action_str = 'edit' if instance else 'add'
if question_form.is_valid():
question_form.save(**request.POST)
messages.success(request, 'Question successfully %sed.' % action_str)
response = HttpResponseRedirect('/questions/')
else:
messages.error(request, 'Question was not %sed.' % action_str)
options = dict(request.POST).get('options', None)
return response, options, question_form
def _render_question_view(request, instance=None):
question_form = QuestionForm(instance=instance)
button_label = 'Create'
options = None
response = None
if instance:
button_label = 'Save'
options = instance.options.all()
options = [option.text for option in options] if options else None
if request.method == 'POST':
response, options, question_form = _process_question_form(request, options, response, instance)
context = {'button_label': button_label,
'id': 'add-question-form',
'request': request,
'class': 'question-form',
'cancel_url': '/questions/',
'questionform': question_form}
if options:
options = filter(lambda text: text.strip(), list(OrderedDict.fromkeys(options)))
options = map(lambda option: re.sub("[%s]" % Question.IGNORED_CHARACTERS, '', option), options)
context['options'] = map(lambda option: re.sub(" ", ' ', option), options)
return response, context
def _create_question_hash_response(questions):
questions_to_display = map(lambda question: {'id': str(question.id), 'text': question.text}, questions)
return HttpResponse(json.dumps(questions_to_display), mimetype='application/json')
def get_questions_for_batch(request, batch_id, question_id):
batch = Batch.objects.get(id=batch_id)
questions = batch.questions.filter(subquestion=False).exclude(id=question_id)
return _create_question_hash_response(questions)
def get_sub_questions_for_question(request, question_id):
question = Question.objects.get(id=question_id)
return _create_question_hash_response(question.get_subquestions())
@permission_required('auth.can_view_batches')
def delete_logic(request, batch_id, answer_rule_id):
rule = AnswerRule.objects.get(id=answer_rule_id)
rule.delete()
messages.success(request, "Logic successfully deleted.")
return HttpResponseRedirect('/batches/%s/questions/' % batch_id)
@permission_required('auth.can_view_batches')
@not_allowed_when_batch_is_open(message=REMOVE_QUESTION_FROM_OPEN_BATCH_ERROR_MESSAGE,
redirect_url_name="batch_questions_page", url_kwargs_keys=['batch_id'])
def remove(request, batch_id, question_id):
batch = Batch.objects.get(id=batch_id)
question = Question.objects.get(id=question_id, batches__id=batch_id)
AnswerRule.objects.filter(question=question, batch=batch).delete()
question.de_associate_from(batch)
messages.success(request, "Question successfully removed from %s." % batch.name)
return HttpResponseRedirect('/batches/%s/questions/' % batch_id)
@permission_required('auth.can_view_batches')
def export_all_questions(request):
return _export_questions(request)
@permission_required('auth.can_view_batches')
def export_batch_questions(request, batch_id):
batch = Batch.objects.get(id=batch_id)
return _export_questions(request, batch)
def _export_questions(request, batch=None):
filename = '%s_questions' % batch.name if batch else 'all_questions'
if request.method == 'POST':
formatted_responses = ExportQuestionsService(batch).formatted_responses()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % filename
response.write("\r\n".join(formatted_responses))
return response
referrer_url = request.META.get('HTTP_REFERER', None)
return HttpResponseRedirect(referrer_url)
|
|
# Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
The following constants identify various SSL session caching modes:
SESS_CACHE_OFF
SESS_CACHE_CLIENT
SESS_CACHE_SERVER
SESS_CACHE_BOTH
"""
import textwrap
import _forge_ssl # if we can't import it, let the error propagate
from _forge_ssl import SSLError
from _forge_ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _forge_ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _forge_ssl import SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH
from _forge_ssl import RAND_status, RAND_egd, RAND_add
from _forge_ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from socket import socket, _fileobject, _delegate_methods
from socket import error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, parent_socket, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
sess_cache_mode=SESS_CACHE_SERVER,
sess_id_ctx=None,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if certfile and not keyfile:
keyfile = certfile
create = True
connected = False
if not server_side:
# see if it's connected
try:
socket.getpeername(self)
connected = True
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._sslobj = None
create = False
if create:
# yes, create the SSL object
if parent_socket == None:
self._sslobj = _forge_ssl.sslwrap(
self._sock,
server_side,
keyfile, certfile,
cert_reqs, ssl_version,
sess_cache_mode, sess_id_ctx,
ca_certs)
else:
self._sslobj = parent_socket._sslobj.wrap_accepted(self._sock)
if connected and do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.sess_cache_mode = sess_cache_mode
self.sess_id_ctx = sess_id_ctx
self.ca_certs = ca_certs
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, addr, flags=0):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
return socket.sendto(self, data, addr, flags)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, addr, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, addr, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
try:
# if connected then shutdown
self.getpeername()
s = self._sslobj.shutdown()
except:
s = self._sock
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
if self._sslobj:
self.unwrap()
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
socket.connect(self, addr)
self._sslobj = _forge_ssl.sslwrap(self._sock, False,
self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.sess_cache_mode,
self.sess_id_ctx,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(self,
newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
sess_cache_mode=self.sess_cache_mode,
sess_id_ctx=self.sess_id_ctx,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, parent_socket=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
sess_cache_mode=SESS_CACHE_SERVER,
sess_id_ctx=None,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
return SSLSocket(parent_socket,
sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version,
sess_cache_mode=sess_cache_mode,
sess_id_ctx=sess_id_ctx,
ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
if protocol_code == PROTOCOL_TLSv1:
return "TLSv1"
elif protocol_code == PROTOCOL_SSLv23:
return "SSLv23"
elif protocol_code == PROTOCOL_SSLv2:
return "SSLv2"
elif protocol_code == PROTOCOL_SSLv3:
return "SSLv3"
else:
return "<unknown>"
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _forge_ssl.sslwrap(sock, 0, keyfile, certfile,
CERT_NONE, PROTOCOL_SSLv23,
SESS_CACHE_SERVER, None, None)
try:
sock.getpeername()
except:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
from ...extern.six.moves import range
from ... import units as u
from ..baseframe import frame_transform_graph
from ..attributes import TimeAttribute
from ..transformations import (FunctionTransformWithFiniteDifference,
FunctionTransform, DynamicMatrixTransform)
from ..representation import (CartesianRepresentation,
UnitSphericalRepresentation)
from .. import earth_orientation as earth
from .utils import EQUINOX_B1950
from .baseradec import _base_radec_docstring, BaseRADecFrame
class FK4(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system.
Note that this is a barycentric version of FK4 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
{params}
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
obstime : `~astropy.time.Time`
The time this frame was observed. If ``None``, will be the same as
``equinox``.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute='equinox')
FK4.__doc__ = FK4.__doc__.format(params=_base_radec_docstring)
# the "self" transform
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4)
def fk4_to_fk4(fk4coord1, fk4frame2):
# deceptively complicated: need to transform to No E-terms FK4, precess, and
# then come back, because precession is non-trivial with E-terms
fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox))
fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox))
return fnoe_w_eqx2.transform_to(fk4frame2)
class FK4NoETerms(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system, but with the E-terms of aberration
removed.
The frame attributes are listed under **Other Parameters**.
{params}
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
obstime : `~astropy.time.Time`
The time this frame was observed. If ``None``, will be the same as
``equinox``.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute='equinox')
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK4 using Newcomb's method.
Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)
FK4NoETerms.__doc__ = FK4NoETerms.__doc__.format(params=_base_radec_docstring)
# the "self" transform
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms)
def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2):
return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox)
# FK4-NO-E to/from FK4 ----------------------------->
# Unlike other frames, this module include *two* frame classes for FK4
# coordinates - one including the E-terms of aberration (FK4), and
# one not including them (FK4NoETerms). The following functions
# implement the transformation between these two.
def fk4_e_terms(equinox):
"""
Return the e-terms of aberation vector
Parameters
----------
equinox : Time object
The equinox for which to compute the e-terms
"""
# Constant of aberration at J2000; from Explanatory Supplement to the
# Astronomical Almanac (Seidelmann, 2005).
k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg)
k = np.radians(k)
# Eccentricity of the Earth's orbit
e = earth.eccentricity(equinox.jd)
# Mean longitude of perigee of the solar orbit
g = earth.mean_lon_of_perigee(equinox.jd)
g = np.radians(g)
# Obliquity of the ecliptic
o = earth.obliquity(equinox.jd, algorithm=1980)
o = np.radians(o)
return e * k * np.sin(g), \
-e * k * np.cos(g) * np.cos(o), \
-e * k * np.cos(g) * np.sin(o)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4NoETerms)
def fk4_to_fk4_no_e(fk4coord, fk4noeframe):
# Extract cartesian vector
rep = fk4coord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled,
copy=False), copy=False)
rep = rep - eterms_a + eterms_a.dot(rep) * rep
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4coord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
# if no obstime was given in the new frame, use the old one for consistency
newobstime = fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime
fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime)
if fk4coord.equinox != fk4noeframe.equinox:
# precession
fk4noe = fk4noe.transform_to(fk4noeframe)
return fk4noe
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4NoETerms, FK4)
def fk4_no_e_to_fk4(fk4noecoord, fk4frame):
# first precess, if necessary
if fk4noecoord.equinox != fk4frame.equinox:
fk4noe_w_fk4equinox = FK4NoETerms(equinox=fk4frame.equinox,
obstime=fk4noecoord.obstime)
fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox)
# Extract cartesian vector
rep = fk4noecoord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled,
copy=False), copy=False)
rep0 = rep.copy()
for _ in range(10):
rep = (eterms_a + rep0) / (1. + eterms_a.dot(rep))
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4noecoord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
return fk4frame.realize_frame(rep)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Add one or more `LinearOperators` efficiently."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.contrib.linalg.python.ops import linear_operator_full_matrix
from tensorflow.contrib.linalg.python.ops import linear_operator_identity
from tensorflow.contrib.linalg.python.ops import linear_operator_tril
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
__all__ = []
def add_operators(operators,
operator_name=None,
addition_tiers=None,
name=None):
"""Efficiently add one or more linear operators.
Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of
operators `[B1, B2,...]` such that
```sum_k Ak.apply(x) = sum_k Bk.apply(x).```
The operators `Bk` result by adding some of the `Ak`, as allowed by
`addition_tiers`.
Example of efficient adding of diagonal operators.
```python
A1 = LinearOperatorDiag(diag=[1., 1.], name="A1")
A2 = LinearOperatorDiag(diag=[2., 2.], name="A2")
# Use two tiers, the first contains an Adder that returns Diag. Since both
# A1 and A2 are Diag, they can use this Adder. The second tier will not be
# used.
addition_tiers = [
[_AddAndReturnDiag()],
[_AddAndReturnMatrix()]]
B_list = add_operators([A1, A2], addition_tiers=addition_tiers)
len(B_list)
==> 1
B_list[0].__class__.__name__
==> 'LinearOperatorDiag'
B_list[0].to_dense()
==> [[3., 0.],
[0., 3.]]
B_list[0].name
==> 'Add/A1__A2/'
```
Args:
operators: Iterable of `LinearOperator` objects with same `dtype`, domain
and range dimensions, and broadcastable batch shapes.
operator_name: String name for returned `LinearOperator`. Defaults to
concatenation of "Add/A__B/" that indicates the order of addition steps.
addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i`
is a list of `Adder` objects. This function attempts to do all additions
in tier `i` before trying tier `i + 1`.
name: A name for this `Op`. Defaults to `add_operators`.
Returns:
Subclass of `LinearOperator`. Class and order of addition may change as new
(and better) addition strategies emerge.
Raises:
ValueError: If `operators` argument is empty.
ValueError: If shapes are incompatible.
"""
# Default setting
if addition_tiers is None:
addition_tiers = _DEFAULT_ADDITION_TIERS
# Argument checking.
check_ops.assert_proper_iterable(operators)
operators = list(reversed(operators))
if len(operators) < 1:
raise ValueError(
"Argument 'operators' must contain at least one operator. "
"Found: %s" % operators)
if not all(
isinstance(op, linear_operator.LinearOperator) for op in operators):
raise TypeError(
"Argument 'operators' must contain only LinearOperator instances. "
"Found: %s" % operators)
_static_check_for_same_dimensions(operators)
_static_check_for_broadcastable_batch_shape(operators)
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
with ops.name_scope(name or "add_operators", values=graph_parents):
# Additions done in one of the tiers. Try tier 0, 1,...
ops_to_try_at_next_tier = list(operators)
for tier in addition_tiers:
ops_to_try_at_this_tier = ops_to_try_at_next_tier
ops_to_try_at_next_tier = []
while ops_to_try_at_this_tier:
op1 = ops_to_try_at_this_tier.pop()
op2, adder = _pop_a_match_at_tier(op1, ops_to_try_at_this_tier, tier)
if op2 is not None:
# Will try to add the result of this again at this same tier.
new_operator = adder.add(op1, op2, operator_name)
ops_to_try_at_this_tier.append(new_operator)
else:
ops_to_try_at_next_tier.append(op1)
return ops_to_try_at_next_tier
def _pop_a_match_at_tier(op1, operator_list, tier):
# Search from the back of list to the front in order to create nice default
# order of operations.
for i in range(1, len(operator_list) + 1):
op2 = operator_list[-i]
for adder in tier:
if adder.can_add(op1, op2):
return operator_list.pop(-i), adder
return None, None
def _infer_hints_allowing_override(op1, op2, hints):
"""Infer hints from op1 and op2. hints argument is an override.
Args:
op1: LinearOperator
op2: LinearOperator
hints: _Hints object holding "is_X" boolean hints to use for returned
operator.
If some hint is None, try to set using op1 and op2. If the
hint is provided, ignore op1 and op2 hints. This allows an override
of previous hints, but does not allow forbidden hints (e.g. you still
cannot say a real diagonal operator is not self-adjoint.
Returns:
_Hints object.
"""
hints = hints or _Hints()
# If A, B are self-adjoint, then so is A + B.
if hints.is_self_adjoint is None:
is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint
else:
is_self_adjoint = hints.is_self_adjoint
# If A, B are positive definite, then so is A + B.
if hints.is_positive_definite is None:
is_positive_definite = op1.is_positive_definite and op2.is_positive_definite
else:
is_positive_definite = hints.is_positive_definite
# A positive definite operator is always non-singular.
if is_positive_definite and hints.is_positive_definite is None:
is_non_singular = True
else:
is_non_singular = hints.is_non_singular
return _Hints(
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite)
def _static_check_for_same_dimensions(operators):
"""ValueError if operators determined to have different dimensions."""
if len(operators) < 2:
return
domain_dimensions = [(op.name, op.domain_dimension.value) for op in operators
if op.domain_dimension.value is not None]
if len(set(value for name, value in domain_dimensions)) > 1:
raise ValueError("Operators must have the same domain dimension. Found: %s"
% domain_dimensions)
range_dimensions = [(op.name, op.range_dimension.value) for op in operators
if op.range_dimension.value is not None]
if len(set(value for name, value in range_dimensions)) > 1:
raise ValueError("Operators must have the same range dimension. Found: %s" %
range_dimensions)
def _static_check_for_broadcastable_batch_shape(operators):
"""ValueError if operators determined to have non-broadcastable shapes."""
if len(operators) < 2:
return
# This will fail if they cannot be broadcast together.
batch_shape = operators[0].batch_shape
for op in operators[1:]:
batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape)
class _Hints(object):
"""Holds 'is_X' flags that every LinearOperator is initialized with."""
def __init__(self,
is_non_singular=None,
is_positive_definite=None,
is_self_adjoint=None):
self.is_non_singular = is_non_singular
self.is_positive_definite = is_positive_definite
self.is_self_adjoint = is_self_adjoint
################################################################################
# Classes to add two linear operators.
################################################################################
@six.add_metaclass(abc.ABCMeta)
class _Adder(object):
"""Abstract base class to add two operators.
Each `Adder` acts independently, adding everything it can, paying no attention
as to whether another `Adder` could have done the addition more efficiently.
"""
@property
def name(self):
return self.__class__.__name__
@abc.abstractmethod
def can_add(self, op1, op2):
"""Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`."""
pass
@abc.abstractmethod
def _add(self, op1, op2, operator_name, hints):
# Derived classes can assume op1 and op2 have been validated, e.g. they have
# the same dtype, and their domain/range dimensions match.
pass
def add(self, op1, op2, operator_name, hints=None):
"""Return new `LinearOperator` acting like `op1 + op2`.
Args:
op1: `LinearOperator`
op2: `LinearOperator`, with `shape` and `dtype` such that adding to
`op1` is allowed.
operator_name: `String` name to give to returned `LinearOperator`
hints: `_Hints` object. Returned `LinearOperator` will be created with
these hints.
Returns:
`LinearOperator`
"""
updated_hints = _infer_hints_allowing_override(op1, op2, hints)
if operator_name is None:
operator_name = "Add/" + op1.name + "__" + op2.name + "/"
values = op1.graph_parents + op2.graph_parents
scope_name = self.name
if scope_name.startswith("_"):
scope_name = scope_name[1:]
with ops.name_scope(scope_name, values=values):
return self._add(op1, op2, operator_name, updated_hints)
class _AddAndReturnScaledIdentity(_Adder):
"""Handles additions resulting in an Identity family member.
The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family
is closed under addition. This `Adder` respects that, and returns an Identity
"""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_IDENTITY_FAMILY)
def _add(self, op1, op2, operator_name, hints):
# Will build a LinearOperatorScaledIdentity.
if _type(op1) == _SCALED_IDENTITY:
multiplier_1 = op1.multiplier
else:
multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)
if _type(op2) == _SCALED_IDENTITY:
multiplier_2 = op2.multiplier
else:
multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=op1.range_dimension_tensor(),
multiplier=multiplier_1 + multiplier_2,
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnDiag(_Adder):
"""Handles additions resulting in a Diag operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE)
def _add(self, op1, op2, operator_name, hints):
return linear_operator_diag.LinearOperatorDiag(
diag=op1.diag_part() + op2.diag_part(),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnTriL(_Adder):
"""Handles additions resulting in a TriL operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE.union({_TRIL}))
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_tril.LinearOperatorTriL(
tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnMatrix(_Adder):
""""Handles additions resulting in a `LinearOperatorFullMatrix`."""
def can_add(self, op1, op2): # pylint: disable=unused-argument
return isinstance(op1, linear_operator.LinearOperator) and isinstance(
op2, linear_operator.LinearOperator)
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_full_matrix.LinearOperatorFullMatrix(
matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
################################################################################
# Constants designating types of LinearOperators
################################################################################
# Type name constants for LinearOperator classes.
_IDENTITY = "identity"
_SCALED_IDENTITY = "scaled_identity"
_DIAG = "diag"
_TRIL = "tril"
_MATRIX = "matrix"
# Groups of operators.
_DIAG_LIKE = {_DIAG, _IDENTITY, _SCALED_IDENTITY}
_IDENTITY_FAMILY = {_IDENTITY, _SCALED_IDENTITY}
# operators with an efficient .add_to_tensor() method.
_EFFICIENT_ADD_TO_TENSOR = _DIAG_LIKE
def _type(operator):
"""Returns the type name constant (e.g. _TRIL) for operator."""
if isinstance(operator, linear_operator_diag.LinearOperatorDiag):
return _DIAG
if isinstance(operator, linear_operator_tril.LinearOperatorTriL):
return _TRIL
if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix):
return _MATRIX
if isinstance(operator, linear_operator_identity.LinearOperatorIdentity):
return _IDENTITY
if isinstance(operator,
linear_operator_identity.LinearOperatorScaledIdentity):
return _SCALED_IDENTITY
raise TypeError("Operator type unknown: %s" % operator)
################################################################################
# Addition tiers:
# We attempt to use Adders in tier K before K+1.
#
# Organize tiers to
# (i) reduce O(..) complexity of forming final operator, and
# (ii) produce the "most efficient" final operator.
# Dev notes:
# * Results of addition at tier K will be added at tier K or higher.
# * Tiers may change, and we warn the user that it may change.
################################################################################
# Note that the final tier, _AddAndReturnMatrix, will convert everything to a
# dense matrix. So it is sometimes very inefficient.
_DEFAULT_ADDITION_TIERS = [
[_AddAndReturnScaledIdentity()],
[_AddAndReturnDiag()],
[_AddAndReturnTriL()],
[_AddAndReturnMatrix()],
]
|
|
import utils.decisions_constants as log
from mahjong.tile import TilesConverter
from mahjong.utils import is_chi, is_honor, is_man, is_pin, is_pon, is_sou, is_terminal, plus_dora, simplify
from utils.decisions_logger import MeldPrint
class BaseStrategy:
YAKUHAI = 0
HONITSU = 1
TANYAO = 2
FORMAL_TEMPAI = 3
CHINITSU = 4
COMMON_OPEN_TEMPAI = 6
TYPES = {
YAKUHAI: "Yakuhai",
HONITSU: "Honitsu",
TANYAO: "Tanyao",
FORMAL_TEMPAI: "Formal Tempai",
CHINITSU: "Chinitsu",
COMMON_OPEN_TEMPAI: "Common Open Tempai",
}
not_suitable_tiles = []
player = None
type = None
# number of shanten where we can start to open hand
min_shanten = 7
go_for_atodzuke = False
dora_count_total = 0
dora_count_central = 0
dora_count_not_central = 0
aka_dora_count = 0
dora_count_honor = 0
def __init__(self, strategy_type, player):
self.type = strategy_type
self.player = player
self.go_for_atodzuke = False
def __str__(self):
return self.TYPES[self.type]
def get_open_hand_han(self):
return 0
def should_activate_strategy(self, tiles_136, meld_tile=None):
"""
Based on player hand and table situation
we can determine should we use this strategy or not.
:param: tiles_136
:return: boolean
"""
self.calculate_dora_count(tiles_136)
return True
def can_meld_into_agari(self):
"""
Is melding into agari allowed with this strategy
:return: boolean
"""
# By default, the logic is the following: if we have any
# non-suitable tiles, we can meld into agari state, because we'll
# throw them away after meld.
# Otherwise, there is no point.
for tile in self.player.tiles:
if not self.is_tile_suitable(tile):
return True
return False
def is_tile_suitable(self, tile):
"""
Can tile be used for open hand strategy or not
:param tile: in 136 tiles format
:return: boolean
"""
raise NotImplementedError()
def determine_what_to_discard(self, discard_options, hand, open_melds):
first_option = sorted(discard_options, key=lambda x: x.shanten)[0]
shanten = first_option.shanten
# for riichi we don't need to discard useful tiles
if shanten == 0 and not self.player.is_open_hand:
return discard_options
# mark all not suitable tiles as ready to discard
# even if they not should be discarded by uke-ire
for x in discard_options:
if not self.is_tile_suitable(x.tile_to_discard_136):
x.had_to_be_discarded = True
return discard_options
def try_to_call_meld(self, tile, is_kamicha_discard, new_tiles):
"""
Determine should we call a meld or not.
If yes, it will return MeldPrint object and tile to discard
:param tile: 136 format tile
:param is_kamicha_discard: boolean
:param new_tiles:
:return: MeldPrint and DiscardOption objects
"""
if self.player.in_riichi:
return None, None
closed_hand = self.player.closed_hand[:]
# we can't open hand anymore
if len(closed_hand) == 1:
return None, None
# we can't use this tile for our chosen strategy
if not self.is_tile_suitable(tile):
return None, None
discarded_tile = tile // 4
closed_hand_34 = TilesConverter.to_34_array(closed_hand + [tile])
combinations = []
first_index = 0
second_index = 0
if is_man(discarded_tile):
first_index = 0
second_index = 8
elif is_pin(discarded_tile):
first_index = 9
second_index = 17
elif is_sou(discarded_tile):
first_index = 18
second_index = 26
if second_index == 0:
# honor tiles
if closed_hand_34[discarded_tile] == 3:
combinations = [[[discarded_tile] * 3]]
else:
# to avoid not necessary calculations
# we can check only tiles around +-2 discarded tile
first_limit = discarded_tile - 2
if first_limit < first_index:
first_limit = first_index
second_limit = discarded_tile + 2
if second_limit > second_index:
second_limit = second_index
combinations = self.player.ai.hand_divider.find_valid_combinations(
closed_hand_34, first_limit, second_limit, True
)
if combinations:
combinations = combinations[0]
possible_melds = []
for best_meld_34 in combinations:
# we can call pon from everyone
if is_pon(best_meld_34) and discarded_tile in best_meld_34:
if best_meld_34 not in possible_melds:
possible_melds.append(best_meld_34)
# we can call chi only from left player
if is_chi(best_meld_34) and is_kamicha_discard and discarded_tile in best_meld_34:
if best_meld_34 not in possible_melds:
possible_melds.append(best_meld_34)
# we can call melds only with allowed tiles
validated_melds = []
for meld in possible_melds:
if (
self.is_tile_suitable(meld[0] * 4)
and self.is_tile_suitable(meld[1] * 4)
and self.is_tile_suitable(meld[2] * 4)
):
validated_melds.append(meld)
possible_melds = validated_melds
if not possible_melds:
return None, None
chosen_meld_dict = self._find_best_meld_to_open(tile, possible_melds, new_tiles, closed_hand, tile)
# we didn't find a good discard candidate after open meld
if not chosen_meld_dict:
return None, None
selected_tile = chosen_meld_dict["discard_tile"]
meld = chosen_meld_dict["meld"]
shanten = selected_tile.shanten
had_to_be_called = self.meld_had_to_be_called(tile)
had_to_be_called = had_to_be_called or selected_tile.had_to_be_discarded
# each strategy can use their own value to min shanten number
if shanten > self.min_shanten:
self.player.logger.debug(
log.MELD_DEBUG,
"After meld shanten is too high for our strategy. Abort melding.",
)
return None, None
# sometimes we had to call tile, even if it will not improve our hand
# otherwise we can call only with improvements of shanten
if not had_to_be_called and shanten >= self.player.ai.shanten:
self.player.logger.debug(
log.MELD_DEBUG,
"Meld is not improving hand shanten. Abort melding.",
)
return None, None
if not self.validate_meld(chosen_meld_dict):
self.player.logger.debug(
log.MELD_DEBUG,
"Meld is suitable for strategy logic. Abort melding.",
)
return None, None
if not self.should_push_against_threats(chosen_meld_dict):
self.player.logger.debug(
log.MELD_DEBUG,
"Meld is too dangerous to call. Abort melding.",
)
return None, None
return meld, selected_tile
def should_push_against_threats(self, chosen_meld_dict) -> bool:
selected_tile = chosen_meld_dict["discard_tile"]
if selected_tile.shanten <= 1:
return True
threats = self.player.ai.defence.get_threatening_players()
if not threats:
return True
# don't open garbage hand against threats
if selected_tile.shanten >= 3:
return False
tile_136 = selected_tile.tile_to_discard_136
if len(threats) == 1:
threat_hand_cost = threats[0].get_assumed_hand_cost(tile_136)
# expensive threat
# and our hand is not good
# let's not open this
if threat_hand_cost >= 7700:
return False
else:
min_threat_hand_cost = min([x.get_assumed_hand_cost(tile_136) for x in threats])
# 2+ threats
# and they are not cheap
# so, let's skip opening of bad hand
if min_threat_hand_cost >= 5200:
return False
return True
def validate_meld(self, chosen_meld_dict):
"""
In some cased we want additionally check that meld is suitable to the strategy
"""
if self.player.is_open_hand:
return True
if not self.player.ai.placement.is_oorasu:
return True
# don't care about not enough cost if we are the dealer
if self.player.is_dealer:
return True
placement = self.player.ai.placement.get_current_placement()
if not placement:
return True
needed_cost = self.player.ai.placement.get_minimal_cost_needed_considering_west(placement=placement)
if needed_cost <= 1000:
return True
selected_tile = chosen_meld_dict["discard_tile"]
if selected_tile.ukeire == 0:
self.player.logger.debug(
log.MELD_DEBUG, "We need to get out of 4th place, but this meld leaves us with zero ukeire"
)
return False
logger_context = {
"placement": placement,
"meld": chosen_meld_dict,
"needed_cost": needed_cost,
}
if selected_tile.shanten == 0:
if not selected_tile.tempai_descriptor:
return True
# tempai has special logger context
logger_context = {
"placement": placement,
"meld": chosen_meld_dict,
"needed_cost": needed_cost,
"tempai_descriptor": selected_tile.tempai_descriptor,
}
if selected_tile.tempai_descriptor["hand_cost"]:
hand_cost = selected_tile.tempai_descriptor["hand_cost"]
else:
hand_cost = selected_tile.tempai_descriptor["cost_x_ukeire"] / selected_tile.ukeire
# optimistic condition - direct ron
if hand_cost * 2 < needed_cost:
self.player.logger.debug(
log.MELD_DEBUG, "No chance to comeback from 4th with this meld, so keep hand closed", logger_context
)
return False
elif selected_tile.shanten == 1:
if selected_tile.average_second_level_cost is None:
return True
# optimistic condition - direct ron
if selected_tile.average_second_level_cost * 2 < needed_cost:
self.player.logger.debug(
log.MELD_DEBUG, "No chance to comeback from 4th with this meld, so keep hand closed", logger_context
)
return False
else:
simple_han_scale = [0, 1000, 2000, 3900, 7700, 8000, 12000, 12000]
num_han = self.get_open_hand_han() + self.dora_count_total
if num_han < len(simple_han_scale):
hand_cost = simple_han_scale[num_han]
# optimistic condition - direct ron
if hand_cost * 2 < needed_cost:
self.player.logger.debug(
log.MELD_DEBUG,
"No chance to comeback from 4th with this meld, so keep hand closed",
logger_context,
)
return False
self.player.logger.debug(log.MELD_DEBUG, "This meld should allow us to comeback from 4th", logger_context)
return True
def meld_had_to_be_called(self, tile):
"""
For special cases meld had to be called even if shanten number will not be increased
:param tile: in 136 tiles format
:return: boolean
"""
return False
def calculate_dora_count(self, tiles_136):
self.dora_count_central = 0
self.dora_count_not_central = 0
self.aka_dora_count = 0
for tile_136 in tiles_136:
tile_34 = tile_136 // 4
dora_count = plus_dora(
tile_136, self.player.table.dora_indicators, add_aka_dora=self.player.table.has_aka_dora
)
if not dora_count:
continue
if is_honor(tile_34):
self.dora_count_not_central += dora_count
self.dora_count_honor += dora_count
elif is_terminal(tile_34):
self.dora_count_not_central += dora_count
else:
self.dora_count_central += dora_count
self.dora_count_central += self.aka_dora_count
self.dora_count_total = self.dora_count_central + self.dora_count_not_central
def _find_best_meld_to_open(self, call_tile_136, possible_melds, new_tiles, closed_hand, discarded_tile):
all_tiles_are_suitable = True
for tile_136 in closed_hand:
all_tiles_are_suitable &= self.is_tile_suitable(tile_136)
final_results = []
for meld_34 in possible_melds:
# in order to fully emulate the possible hand with meld, we save original melds state,
# modify player's melds and then restore original melds state after everything is done
melds_original = self.player.melds[:]
tiles_original = self.player.tiles[:]
tiles = self._find_meld_tiles(closed_hand, meld_34, discarded_tile)
meld = MeldPrint()
meld.type = is_chi(meld_34) and MeldPrint.CHI or MeldPrint.PON
meld.tiles = sorted(tiles)
self.player.logger.debug(
log.MELD_HAND, f"Hand: {self._format_hand_for_print(closed_hand, discarded_tile, self.player.melds)}"
)
# update player hand state to emulate new situation and choose what to discard
self.player.tiles = new_tiles[:]
self.player.add_called_meld(meld)
selected_tile = self.player.ai.hand_builder.choose_tile_to_discard(after_meld=True)
# restore original tiles and melds state
self.player.tiles = tiles_original
self.player.melds = melds_original
# we can't find a good discard candidate, so let's skip this
if not selected_tile:
self.player.logger.debug(log.MELD_DEBUG, "Can't find discard candidate after meld. Abort melding.")
continue
if not all_tiles_are_suitable and self.is_tile_suitable(selected_tile.tile_to_discard_136):
self.player.logger.debug(
log.MELD_DEBUG,
"We have tiles in our hand that are not suitable to current strategy, "
"but we are going to discard tile that we need. Abort melding.",
)
continue
call_tile_34 = call_tile_136 // 4
# we can't discard the same tile that we called
if selected_tile.tile_to_discard_34 == call_tile_34:
self.player.logger.debug(
log.MELD_DEBUG, "We can't discard same tile that we used for meld. Abort melding."
)
continue
# we can't discard tile from the other end of the same ryanmen that we called
if not is_honor(selected_tile.tile_to_discard_34) and meld.type == MeldPrint.CHI:
if is_sou(selected_tile.tile_to_discard_34) and is_sou(call_tile_34):
same_suit = True
elif is_man(selected_tile.tile_to_discard_34) and is_man(call_tile_34):
same_suit = True
elif is_pin(selected_tile.tile_to_discard_34) and is_pin(call_tile_34):
same_suit = True
else:
same_suit = False
if same_suit:
simplified_meld_0 = simplify(meld.tiles[0] // 4)
simplified_meld_1 = simplify(meld.tiles[1] // 4)
simplified_call = simplify(call_tile_34)
simplified_discard = simplify(selected_tile.tile_to_discard_34)
kuikae = False
if simplified_discard == simplified_call - 3:
kuikae_set = [simplified_call - 1, simplified_call - 2]
if simplified_meld_0 in kuikae_set and simplified_meld_1 in kuikae_set:
kuikae = True
elif simplified_discard == simplified_call + 3:
kuikae_set = [simplified_call + 1, simplified_call + 2]
if simplified_meld_0 in kuikae_set and simplified_meld_1 in kuikae_set:
kuikae = True
if kuikae:
tile_str = TilesConverter.to_one_line_string(
[selected_tile.tile_to_discard_136], print_aka_dora=self.player.table.has_aka_dora
)
self.player.logger.debug(
log.MELD_DEBUG,
f"Kuikae discard {tile_str} candidate. Abort melding.",
)
continue
final_results.append(
{
"discard_tile": selected_tile,
"meld_print": TilesConverter.to_one_line_string([meld_34[0] * 4, meld_34[1] * 4, meld_34[2] * 4]),
"meld": meld,
}
)
if not final_results:
self.player.logger.debug(log.MELD_DEBUG, "There are no good discards after melding.")
return None
final_results = sorted(
final_results,
key=lambda x: (x["discard_tile"].shanten, -x["discard_tile"].ukeire, x["discard_tile"].valuation),
)
self.player.logger.debug(
log.MELD_PREPARE,
"Tiles could be used for open meld",
context=final_results,
)
return final_results[0]
@staticmethod
def _find_meld_tiles(closed_hand, meld_34, discarded_tile):
discarded_tile_34 = discarded_tile // 4
meld_34_copy = meld_34[:]
closed_hand_copy = closed_hand[:]
meld_34_copy.remove(discarded_tile_34)
first_tile = TilesConverter.find_34_tile_in_136_array(meld_34_copy[0], closed_hand_copy)
closed_hand_copy.remove(first_tile)
second_tile = TilesConverter.find_34_tile_in_136_array(meld_34_copy[1], closed_hand_copy)
closed_hand_copy.remove(second_tile)
tiles = [first_tile, second_tile, discarded_tile]
return tiles
def _format_hand_for_print(self, tiles, new_tile, melds):
tiles_string = TilesConverter.to_one_line_string(tiles, print_aka_dora=self.player.table.has_aka_dora)
tile_string = TilesConverter.to_one_line_string([new_tile], print_aka_dora=self.player.table.has_aka_dora)
hand_string = f"{tiles_string} + {tile_string}"
hand_string += " [{}]".format(
", ".join(
[
TilesConverter.to_one_line_string(x.tiles, print_aka_dora=self.player.table.has_aka_dora)
for x in melds
]
)
)
return hand_string
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'config_dialog.ui'
#
# Created: Mon Jun 13 02:31:42 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_config_dialog(object):
def setupUi(self, config_dialog):
config_dialog.setObjectName(_fromUtf8("config_dialog"))
config_dialog.resize(431, 393)
self.gridLayout = QtGui.QGridLayout(config_dialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.groupBox_3 = QtGui.QGroupBox(config_dialog)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout_6 = QtGui.QVBoxLayout()
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.label_9 = QtGui.QLabel(self.groupBox_3)
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.verticalLayout_6.addWidget(self.label_9)
self.label_10 = QtGui.QLabel(self.groupBox_3)
self.label_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.verticalLayout_6.addWidget(self.label_10)
self.label_11 = QtGui.QLabel(self.groupBox_3)
self.label_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.verticalLayout_6.addWidget(self.label_11)
self.label_12 = QtGui.QLabel(self.groupBox_3)
self.label_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.verticalLayout_6.addWidget(self.label_12)
self.horizontalLayout_3.addLayout(self.verticalLayout_6)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.chainComboBox = QtGui.QComboBox(self.groupBox_3)
self.chainComboBox.setObjectName(_fromUtf8("chainComboBox"))
self.chainComboBox.addItem(_fromUtf8(""))
self.chainComboBox.addItem(_fromUtf8(""))
self.verticalLayout_5.addWidget(self.chainComboBox)
self.channameLineEdit = QtGui.QLineEdit(self.groupBox_3)
self.channameLineEdit.setObjectName(_fromUtf8("channameLineEdit"))
self.verticalLayout_5.addWidget(self.channameLineEdit)
self.feeDoubleSpinBox = QtGui.QDoubleSpinBox(self.groupBox_3)
self.feeDoubleSpinBox.setDecimals(8)
self.feeDoubleSpinBox.setSingleStep(1e-06)
self.feeDoubleSpinBox.setObjectName(_fromUtf8("feeDoubleSpinBox"))
self.verticalLayout_5.addWidget(self.feeDoubleSpinBox)
self.minconfSpinBox = QtGui.QSpinBox(self.groupBox_3)
self.minconfSpinBox.setObjectName(_fromUtf8("minconfSpinBox"))
self.verticalLayout_5.addWidget(self.minconfSpinBox)
self.horizontalLayout_3.addLayout(self.verticalLayout_5)
self.gridLayout_4.addLayout(self.horizontalLayout_3, 0, 0, 1, 1)
self.verticalLayout_7.addWidget(self.groupBox_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.groupBox = QtGui.QGroupBox(config_dialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout.addWidget(self.label_4)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.btcuserLineEdit = QtGui.QLineEdit(self.groupBox)
self.btcuserLineEdit.setObjectName(_fromUtf8("btcuserLineEdit"))
self.verticalLayout_2.addWidget(self.btcuserLineEdit)
self.btcpswdLineEdit = QtGui.QLineEdit(self.groupBox)
self.btcpswdLineEdit.setObjectName(_fromUtf8("btcpswdLineEdit"))
self.verticalLayout_2.addWidget(self.btcpswdLineEdit)
self.btchostLineEdit = QtGui.QLineEdit(self.groupBox)
self.btchostLineEdit.setObjectName(_fromUtf8("btchostLineEdit"))
self.verticalLayout_2.addWidget(self.btchostLineEdit)
self.btcportSpinBox = QtGui.QSpinBox(self.groupBox)
self.btcportSpinBox.setMaximum(99999)
self.btcportSpinBox.setObjectName(_fromUtf8("btcportSpinBox"))
self.verticalLayout_2.addWidget(self.btcportSpinBox)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.gridLayout_2.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.horizontalLayout_4.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(config_dialog)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_3.addWidget(self.label_5)
self.label_6 = QtGui.QLabel(self.groupBox_2)
self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout_3.addWidget(self.label_6)
self.label_7 = QtGui.QLabel(self.groupBox_2)
self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.verticalLayout_3.addWidget(self.label_7)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.verticalLayout_3.addWidget(self.label_8)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.bmuserLineEdit = QtGui.QLineEdit(self.groupBox_2)
self.bmuserLineEdit.setObjectName(_fromUtf8("bmuserLineEdit"))
self.verticalLayout_4.addWidget(self.bmuserLineEdit)
self.bmpswdLineEdit = QtGui.QLineEdit(self.groupBox_2)
self.bmpswdLineEdit.setObjectName(_fromUtf8("bmpswdLineEdit"))
self.verticalLayout_4.addWidget(self.bmpswdLineEdit)
self.bmhostLineEdit = QtGui.QLineEdit(self.groupBox_2)
self.bmhostLineEdit.setObjectName(_fromUtf8("bmhostLineEdit"))
self.verticalLayout_4.addWidget(self.bmhostLineEdit)
self.bmportSpinBox = QtGui.QSpinBox(self.groupBox_2)
self.bmportSpinBox.setMaximum(99999)
self.bmportSpinBox.setObjectName(_fromUtf8("bmportSpinBox"))
self.verticalLayout_4.addWidget(self.bmportSpinBox)
self.horizontalLayout_2.addLayout(self.verticalLayout_4)
self.gridLayout_3.addLayout(self.horizontalLayout_2, 0, 0, 1, 1)
self.horizontalLayout_4.addWidget(self.groupBox_2)
self.verticalLayout_7.addLayout(self.horizontalLayout_4)
self.gridLayout.addLayout(self.verticalLayout_7, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(config_dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.label_9.setBuddy(self.chainComboBox)
self.label_10.setBuddy(self.channameLineEdit)
self.label_11.setBuddy(self.feeDoubleSpinBox)
self.label_12.setBuddy(self.minconfSpinBox)
self.label.setBuddy(self.btcuserLineEdit)
self.label_2.setBuddy(self.btcpswdLineEdit)
self.label_3.setBuddy(self.btchostLineEdit)
self.label_4.setBuddy(self.btcportSpinBox)
self.label_5.setBuddy(self.bmuserLineEdit)
self.label_6.setBuddy(self.bmpswdLineEdit)
self.label_7.setBuddy(self.bmhostLineEdit)
self.label_8.setBuddy(self.bmportSpinBox)
self.retranslateUi(config_dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), config_dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), config_dialog.reject)
QtCore.QMetaObject.connectSlotsByName(config_dialog)
config_dialog.setTabOrder(self.chainComboBox, self.channameLineEdit)
config_dialog.setTabOrder(self.channameLineEdit, self.feeDoubleSpinBox)
config_dialog.setTabOrder(self.feeDoubleSpinBox, self.minconfSpinBox)
config_dialog.setTabOrder(self.minconfSpinBox, self.btcuserLineEdit)
config_dialog.setTabOrder(self.btcuserLineEdit, self.btcpswdLineEdit)
config_dialog.setTabOrder(self.btcpswdLineEdit, self.btchostLineEdit)
config_dialog.setTabOrder(self.btchostLineEdit, self.btcportSpinBox)
config_dialog.setTabOrder(self.btcportSpinBox, self.bmuserLineEdit)
config_dialog.setTabOrder(self.bmuserLineEdit, self.bmpswdLineEdit)
config_dialog.setTabOrder(self.bmpswdLineEdit, self.bmhostLineEdit)
config_dialog.setTabOrder(self.bmhostLineEdit, self.bmportSpinBox)
config_dialog.setTabOrder(self.bmportSpinBox, self.buttonBox)
def retranslateUi(self, config_dialog):
config_dialog.setWindowTitle(_translate("config_dialog", "METAbuyer-Qt Settings", None))
self.groupBox_3.setTitle(_translate("config_dialog", "Configuration:", None))
self.label_9.setText(_translate("config_dialog", "Bitcoin Chain", None))
self.label_10.setText(_translate("config_dialog", "Channel", None))
self.label_11.setText(_translate("config_dialog", "Default Fee", None))
self.label_12.setText(_translate("config_dialog", "Minimum Confirms", None))
self.chainComboBox.setItemText(0, _translate("config_dialog", "mainnet", None))
self.chainComboBox.setItemText(1, _translate("config_dialog", "testnet", None))
self.groupBox.setTitle(_translate("config_dialog", "Bitcoin Core RPC:", None))
self.label.setText(_translate("config_dialog", "User", None))
self.label_2.setText(_translate("config_dialog", "Password", None))
self.label_3.setText(_translate("config_dialog", "Host", None))
self.label_4.setText(_translate("config_dialog", "Port", None))
self.groupBox_2.setTitle(_translate("config_dialog", "Bitmessage RPC:", None))
self.label_5.setText(_translate("config_dialog", "User", None))
self.label_6.setText(_translate("config_dialog", "Password", None))
self.label_7.setText(_translate("config_dialog", "Host", None))
self.label_8.setText(_translate("config_dialog", "Port", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
config_dialog = QtGui.QDialog()
ui = Ui_config_dialog()
ui.setupUi(config_dialog)
config_dialog.show()
sys.exit(app.exec_())
|
|
# -*- coding: utf-8 -*-
"""
gspread.models
~~~~~~~~~~~~~~
This module contains common spreadsheets' models
"""
import re
from collections import defaultdict
from itertools import chain
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from .ns import _ns, _ns1, ATOM_NS, BATCH_NS, SPREADSHEET_NS
from .urls import construct_url
from .utils import finditem, numericise_all
from .exceptions import IncorrectCellLabel, WorksheetNotFound, CellNotFound
try:
unicode
except NameError:
basestring = unicode = str
# Patch ElementTree._escape_attrib
_elementtree_escape_attrib = ElementTree._escape_attrib
def _escape_attrib(text, encoding=None, replace=None):
try:
text = _elementtree_escape_attrib(text)
except TypeError as e:
if str(e) == '_escape_attrib() takes exactly 2 arguments (1 given)':
text = _elementtree_escape_attrib(text, encoding)
entities = {'\n': ' ', '\r': ' ', '\t': '	'}
for key, value in entities.items():
text = text.replace(key, value)
return text
ElementTree._escape_attrib = _escape_attrib
class Spreadsheet(object):
""" A class for a spreadsheet object."""
def __init__(self, client, feed_entry):
self.client = client
id_parts = feed_entry.find(_ns('id')).text.split('/')
self.id = id_parts[-1]
self._sheet_list = []
def get_id_fields(self):
return {'spreadsheet_id': self.id}
def _fetch_sheets(self):
feed = self.client.get_worksheets_feed(self)
for elem in feed.findall(_ns('entry')):
self._sheet_list.append(Worksheet(self, elem))
def add_worksheet(self, title, rows, cols):
"""Adds a new worksheet to a spreadsheet.
:param title: A title of a new worksheet.
:param rows: Number of rows.
:param cols: Number of columns.
Returns a newly created :class:`worksheets <Worksheet>`.
"""
feed = Element('entry', {'xmlns': ATOM_NS,
'xmlns:gs': SPREADSHEET_NS})
SubElement(feed, 'title').text = title
SubElement(feed, 'gs:rowCount').text = str(rows)
SubElement(feed, 'gs:colCount').text = str(cols)
url = construct_url('worksheets', self)
elem = self.client.post_feed(url, ElementTree.tostring(feed))
worksheet = Worksheet(self, elem)
self._sheet_list.append(worksheet)
return worksheet
def del_worksheet(self, worksheet):
"""Deletes a worksheet from a spreadsheet.
:param worksheet: The worksheet to be deleted.
"""
self.client.del_worksheet(worksheet)
self._sheet_list.remove(worksheet)
def worksheets(self):
"""Returns a list of all :class:`worksheets <Worksheet>`
in a spreadsheet.
"""
if not self._sheet_list:
self._fetch_sheets()
return self._sheet_list[:]
def worksheet(self, title):
"""Returns a worksheet with specified `title`.
The returning object is an instance of :class:`Worksheet`.
:param title: A title of a worksheet. If there're multiple
worksheets with the same title, first one will
be returned.
Example. Getting worksheet named 'Annual bonuses'
>>> sht = client.open('Sample one')
>>> worksheet = sht.worksheet('Annual bonuses')
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return finditem(lambda x: x.title == title, self._sheet_list)
except StopIteration:
raise WorksheetNotFound(title)
def get_worksheet(self, index):
"""Returns a worksheet with specified `index`.
The returning object is an instance of :class:`Worksheet`.
:param index: An index of a worksheet. Indexes start from zero.
Example. To get first worksheet of a spreadsheet:
>>> sht = client.open('My fancy spreadsheet')
>>> worksheet = sht.get_worksheet(0)
Returns `None` if the worksheet is not found.
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return self._sheet_list[index]
except IndexError:
return None
@property
def sheet1(self):
"""Shortcut property for getting the first worksheet."""
return self.get_worksheet(0)
class Worksheet(object):
"""A class for worksheet object."""
def __init__(self, spreadsheet, element):
self.spreadsheet = spreadsheet
self.client = spreadsheet.client
self._id = element.find(_ns('id')).text.split('/')[-1]
self._title = element.find(_ns('title')).text
self._element = element
self.version = self._get_link(
'edit', element).get('href').split('/')[-1]
def __repr__(self):
return '<%s %s id:%s>' % (self.__class__.__name__,
repr(self.title),
self.id)
@property
def id(self):
"""Id of a worksheet."""
return self._id
@property
def title(self):
"""Title of a worksheet."""
return self._title
@property
def row_count(self):
"""Number of rows"""
return int(self._element.find(_ns1('rowCount')).text)
@property
def col_count(self):
"""Number of columns"""
return int(self._element.find(_ns1('colCount')).text)
@property
def updated(self):
"""Updated time in RFC 3339 format"""
return self._element.find(_ns('updated')).text
def get_id_fields(self):
return {'spreadsheet_id': self.spreadsheet.id,
'worksheet_id': self.id}
def _cell_addr(self, row, col):
return 'R%sC%s' % (row, col)
def _get_link(self, link_type, feed):
return finditem(lambda x: x.get('rel') == link_type,
feed.findall(_ns('link')))
def _fetch_cells(self):
feed = self.client.get_cells_feed(self)
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
_MAGIC_NUMBER = 64
_cell_addr_re = re.compile(r'([A-Za-z]+)(\d+)')
def get_int_addr(self, label):
"""Translates cell's label address to a tuple of integers.
The result is a tuple containing `row` and `column` numbers.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.get_int_addr('A1')
(1, 1)
"""
m = self._cell_addr_re.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - self._MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def get_addr_int(self, row, col):
"""Translates cell's tuple of integers to a cell label.
The result is a string containing the cell's coordinates in label form.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
Example:
>>> wks.get_addr_int(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + self._MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def acell(self, label):
"""Returns an instance of a :class:`Cell`.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.acell('A1') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.cell(*(self.get_int_addr(label)))
def cell(self, row, col):
"""Returns an instance of a :class:`Cell` positioned in `row`
and `col` column.
:param row: Integer row number.
:param col: Integer column number.
Example:
>>> wks.cell(1, 1)
<Cell R1C1 "I'm cell A1">
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
return Cell(self, feed)
def range(self, alphanum):
"""Returns a list of :class:`Cell` objects from specified range.
:param alphanum: A string with range value in common format,
e.g. 'A1:A5'.
"""
feed = self.client.get_cells_feed(self, params={'range': alphanum,
'return-empty': 'true'})
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
def get_all_values(self):
"""Returns a list of lists containing all cells' values as strings."""
cells = self._fetch_cells()
# defaultdicts fill in gaps for empty rows/cells not returned by gdocs
rows = defaultdict(lambda: defaultdict(str))
for cell in cells:
row = rows.setdefault(int(cell.row), defaultdict(str))
row[cell.col] = cell.value
# we return a whole rectangular region worth of cells, including
# empties
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(1, max(all_row_keys) + 1)
rect_rows = range(1, max(rows.keys()) + 1)
return [[rows[i][j] for j in rect_cols] for i in rect_rows]
def get_all_records(self, empty2zero=False):
"""Returns a list of dictionaries, all of them having:
- the contents of the spreadsheet's first row of cells as keys,
And each of these dictionaries holding
- the contents of subsequent rows of cells as values.
Cell values are numericised (strings that can be read as ints
or floats are converted).
:param empty2zero: determines whether empty cells are converted to zeros."""
data = self.get_all_values()
keys = data[0]
values = [numericise_all(row, empty2zero) for row in data[1:]]
return [dict(zip(keys, row)) for row in values]
def _list_values(self, index, cell_tuple, position):
cells_list = self._fetch_cells()
cells = dict(map(cell_tuple, filter(position, cells_list)))
try:
last_index = max(cells.keys())
except ValueError:
return []
vals = []
for i in range(1, last_index + 1):
c = cells.get(i)
vals.append(c.value if c else None)
return vals
def row_values(self, row):
"""Returns a list of all values in a `row`.
Empty cells in this list will be rendered as :const:`None`.
"""
return self._list_values(row,
lambda cell: (cell.col, cell),
lambda cell: cell.row == row)
def col_values(self, col):
"""Returns a list of all values in column `col`.
Empty cells in this list will be rendered as :const:`None`.
"""
return self._list_values(col,
lambda cell: (cell.row, cell),
lambda cell: cell.col == col)
def update_acell(self, label, val):
"""Sets the new value to a cell.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
:param val: New value.
Example:
>>> wks.update_acell('A1', '42') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.update_cell(*(self.get_int_addr(label)), val=val)
def update_cell(self, row, col, val):
"""Sets the new value to a cell.
:param row: Row number.
:param col: Column number.
:param val: New value.
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
cell_elem = feed.find(_ns1('cell'))
cell_elem.set('inputValue', unicode(val))
uri = self._get_link('edit', feed).get('href')
self.client.put_feed(uri, ElementTree.tostring(feed))
def _create_update_feed(self, cell_list):
feed = Element('feed', {'xmlns': ATOM_NS,
'xmlns:batch': BATCH_NS,
'xmlns:gs': SPREADSHEET_NS})
id_elem = SubElement(feed, 'id')
id_elem.text = construct_url('cells', self)
for cell in cell_list:
entry = SubElement(feed, 'entry')
SubElement(entry, 'batch:id').text = cell.element.find(
_ns('title')).text
SubElement(entry, 'batch:operation', {'type': 'update'})
SubElement(entry, 'id').text = cell.element.find(_ns('id')).text
edit_link = finditem(lambda x: x.get('rel') == 'edit',
cell.element.findall(_ns('link')))
SubElement(entry, 'link', {'rel': 'edit',
'type': edit_link.get('type'),
'href': edit_link.get('href')})
SubElement(entry, 'gs:cell', {'row': str(cell.row),
'col': str(cell.col),
'inputValue': unicode(cell.value)})
return feed
def update_cells(self, cell_list):
"""Updates cells in batch.
:param cell_list: List of a :class:`Cell` objects to update.
"""
feed = self._create_update_feed(cell_list)
self.client.post_cells(self, ElementTree.tostring(feed))
def resize(self, rows=None, cols=None):
"""Resizes the worksheet.
:param rows: New rows number.
:param cols: New columns number.
"""
if rows is None and cols is None:
raise TypeError("Either 'rows' or 'cols' should be specified.")
self_uri = self._get_link('self', self._element).get('href')
feed = self.client.get_feed(self_uri)
uri = self._get_link('edit', feed).get('href')
if rows:
elem = feed.find(_ns1('rowCount'))
elem.text = str(rows)
if cols:
elem = feed.find(_ns1('colCount'))
elem.text = str(cols)
# Send request and store result
self._element = self.client.put_feed(uri, ElementTree.tostring(feed))
def add_rows(self, rows):
"""Adds rows to worksheet.
:param rows: Rows number to add.
"""
self.resize(rows=self.row_count + rows)
def add_cols(self, cols):
"""Adds colums to worksheet.
:param cols: Columns number to add.
"""
self.resize(cols=self.col_count + cols)
def append_row(self, values):
""""Adds a row to the worksheet and populates it with values.
Widens the worksheet if there are more values than columns.
:param values: List of values for the new row.
"""
self.add_rows(1)
new_row = self.row_count
data_width = len(values)
if self.col_count < data_width:
self.resize(cols=data_width)
cell_list = []
for i, value in enumerate(values, start=1):
cell = self.cell(new_row, i)
cell.value = value
cell_list.append(cell)
self.update_cells(cell_list)
def _finder(self, func, query):
cells = self._fetch_cells()
if isinstance(query, basestring):
match = lambda x: x.value == query
else:
match = lambda x: query.search(x.value)
return func(match, cells)
def find(self, query):
"""Finds first cell matching query.
:param query: A text string or compiled regular expression.
"""
try:
return self._finder(finditem, query)
except StopIteration:
raise CellNotFound(query)
def findall(self, query):
"""Finds all cells matching query.
:param query: A text string or compiled regular expression.
"""
return self._finder(filter, query)
class Cell(object):
"""An instance of this class represents a single cell
in a :class:`worksheet <Worksheet>`.
"""
def __init__(self, worksheet, element):
self.element = element
cell_elem = element.find(_ns1('cell'))
self._row = int(cell_elem.get('row'))
self._col = int(cell_elem.get('col'))
self.input_value = cell_elem.get('inputValue')
#: Value of the cell.
self.value = cell_elem.text or ''
@property
def row(self):
"""Row number of the cell."""
return self._row
@property
def col(self):
"""Column number of the cell."""
return self._col
def __repr__(self):
return '<%s R%sC%s %s>' % (self.__class__.__name__,
self.row,
self.col,
repr(self.value))
|
|
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
"""Module providing class for Wang sense disambiguation.
Attributes:
WangExplicitSenser (class):
class that predict senses of explicit relations
"""
##################################################################
# Imports
from __future__ import absolute_import, print_function, unicode_literals
from dsenser.constants import ARG1, ARG2, CONNECTIVE, DOC_ID, \
TOK_ID, TOK_LIST, SENTENCES, WORDS, POS, TOK_IDX, SNT_ID, \
PARSE_TREE
from dsenser.resources import CONNTOK2CONN, CONNTOKS, conn2str
from dsenser.wang.wangbase import WangBaseSenser
from dsenser.utils import timeit
from collections import defaultdict
from nltk import Tree
import re
import sys
##################################################################
# Variables and Constants
ENCODING = "utf-8"
PREV_NONE = "prev1_NONE"
SPACE_RE = re.compile(r"\s+")
MULTISPACE_RE = re.compile(r"\s\s+")
EQ_RE = re.compile(r"=+")
LEFT = 1
RIGHT = 2
DFLT_PRNT = "SBAR"
AS = "as"
WHEN = "when"
NEGATION = ("not", "n't")
MODALITY = {"can": 0, "may": 1, "must": 2, "need": 3, "shall": 4,
"will": 5, "could": 0, "would": 5, "might": 1,
"should": 4, "'ll": 4, "wo": 5, "sha": 4, "ca": 0,
"have to": 6, "had to": 6, "'d to": 6, "'ve to": 6
}
##################################################################
# Class
class WangExplicitSenser(WangBaseSenser):
"""Class for disambiguating explicit connectives.
Attributes:
n_y (int): number of distinct classes
a_grid_search (bool): use grid search for estimating hyper-parameters
"""
def __init__(self, a_clf=None, a_grid_search=False):
"""Class constructor.
Args:
a_clf (classifier or None):
classifier to use or None for default
a_grid_search (bool): use grid search for estimating hyper-parameters
"""
super(WangExplicitSenser, self).__init__(a_clf,
a_grid_search)
self.n_y = -1
self.ctype = "explicit"
@timeit("Training explicit Wang classifier...")
def train(self, *args, **kwargs):
super(WangExplicitSenser, self).train(*args, **kwargs)
def _extract_features(self, a_rel, a_parses):
"""Extract classification features for a given relation.
Args:
a_rel (dict):
discourse relation to extract features for
a_parses (dict):
parsed sentences
Returns:
void:
"""
feats = {}
doc_id = a_rel[DOC_ID]
snt_id = a_rel[CONNECTIVE][TOK_LIST][-1][SNT_ID]
###########################
# connective token features
conn_tok = self._get_conn_txt(doc_id, a_rel, a_parses)
conn_ltok = conn_tok.lower()
assert conn_tok, \
"Connective string cannot be empty for explicit classifier."
conn_str = "ConStr-" + conn_tok
feats[conn_str] = 1
conn_lstr = conn_str.lower()
feats[conn_lstr] = 1
# obtain POS of the connective
conn_pos = "conpos-" + self._get_conn_pos(doc_id, a_rel, a_parses)
feats[conn_pos] = 1
# obtain token preceding the connective
prev_tok = "prevtok-" + self._get_conn_prev(doc_id, a_rel, a_parses)
feats["Tok1Tok2-{:s}|{:s}".format(conn_str, prev_tok)] = 1
####################
# syntactic features
tree_str = a_parses[doc_id][SENTENCES][snt_id][PARSE_TREE]
parse_tree = Tree.fromstring(tree_str)
if not parse_tree.leaves():
print("Invalid parse tree for sentence {:d}".format(snt_id),
file=sys.stderr)
return {self._escape_feat(k): v for k, v in feats.iteritems()}
conn_t_ids = [t[-1] for t in a_rel[CONNECTIVE][TOK_LIST]]
scat = "SyntCat-" + self._get_cat(conn_t_ids, parse_tree)
feats[scat] = 1
prnt_cat = "PrntCat-" + self._get_prnt_cat(conn_t_ids, parse_tree)
feats[prnt_cat] = 1
left_sib = "LeftSib-" + self._get_sib(conn_t_ids, parse_tree, LEFT)
feats[left_sib] = 1
right_sib = "RightSib-" + self._get_sib(conn_t_ids, parse_tree, RIGHT)
feats[right_sib] = 1
################
# joint features
feats[conn_ltok + '|' + scat] = 1
feats[conn_ltok + '|' + prnt_cat] = 1
feats[conn_ltok + '|' + left_sib] = 1
feats[conn_ltok + '|' + right_sib] = 1
feats[scat + '|' + prnt_cat] = 1
feats[scat + '|' + left_sib] = 1
feats[scat + '|' + right_sib] = 1
feats[prnt_cat + '|' + left_sib] = 1
feats[prnt_cat + '|' + right_sib] = 1
feats[left_sib + '|' + right_sib] = 1
#################
# Wang's features
ctx_nodes = "CtxNodes-" + '-'.join(self._get_ctx_nodes(conn_t_ids,
parse_tree))
feats[ctx_nodes] = 1
if conn_ltok == AS or conn_ltok == WHEN:
toks = a_parses[doc_id][SENTENCES][snt_id][WORDS]
prev_conn, prev_pos = self._get_prev_conn(conn_t_ids[0],
parse_tree,
toks)
if prev_conn:
feats[conn_ltok + '-' + conn2str(prev_conn[0])] = 1
feats[conn_ltok + "PoS-" + ','.join(prev_pos[0])] = 1
if conn_ltok != AS:
feats["NotAs"] = 1
if conn_ltok != WHEN:
feats["NotWhen"] = 1
#########################
# Own features (TS)
toks_pos1 = self._get_toks_pos(a_parses[doc_id][SENTENCES],
a_rel, ARG1)
toks_pos2 = self._get_toks_pos(a_parses[doc_id][SENTENCES],
a_rel, ARG2)
# modality (copied from implicit)
self._get_modality(feats, toks_pos1, toks_pos2)
# negation
self._get_negation(feats, toks_pos1, toks_pos2)
# obtain token (and pos) following the connective
(succ_tok, succ_pos) = self._get_conn_succ(doc_id, a_rel, a_parses)
feats["succtok-" + succ_tok] = 1
feats["succpos-" + succ_pos] = 1
##########################
# Normalize feature names
return {self._escape_feat(k): v for k, v in feats.iteritems()}
def _get_conn_txt(self, a_doc_id, a_rel, a_parses):
"""Obtain raw text of the connective.
Args:
a_doc_id (str):
id of the document containing the connectiv
a_rel (dict):
discourse relation to extract features for
a_parses (dict):
parsed data
Returns:
str: connective's text
"""
return ' '.join([
a_parses[a_doc_id][SENTENCES][snt_id][WORDS][tok_id][TOK_IDX]
for _, _, _, snt_id, tok_id in a_rel[CONNECTIVE][TOK_LIST]])
def _get_conn_pos(self, a_doc_id, a_rel, a_parses):
"""Obtain part-of-speech tags of the connective.
Args:
a_doc_id (str):
id of the document containing the connectiv
a_rel (dict):
discourse relation to extract features for
a_parses (dict):
parsed data
Returns:
str: connective's part-of-speech tags
"""
return '_'.join([
a_parses[a_doc_id][SENTENCES][snt_id][WORDS][tok_id][1][POS]
for _, _, _, snt_id, tok_id in a_rel[CONNECTIVE][TOK_LIST]])
def _get_conn_prev(self, a_doc_id, a_rel, a_parses):
"""Obtain token preceding the connective.
Args:
a_doc_id (str):
id of the document containing the connective
a_rel (dict):
discourse relation to extract features for
a_parses (dict):
parsed data
Returns:
str: connective's text
"""
_, _, _, snt_id, tok_id = a_rel[CONNECTIVE][TOK_LIST][0]
if tok_id > 0:
tok_id -= 1
elif snt_id > 0:
snt_id -= 1
tok_id = -1
else:
return PREV_NONE
return a_parses[a_doc_id][SENTENCES][snt_id][WORDS][tok_id][TOK_IDX]
def _get_conn_succ(self, a_doc_id, a_rel, a_parses):
"""Obtain the token and POS following the connective
Args:
a_doc_id (str):
id of the document containing the connectiv
a_rel (dict):
discourse relation to extract features for
a_parses (dict):
parsed data
Returns:
tuple(str, pos): next token and pos
"""
_, _, _, snt_id, tok_id = a_rel[CONNECTIVE][TOK_LIST][0]
if len(a_parses[a_doc_id][SENTENCES][snt_id][WORDS]) > \
(tok_id + 1):
tok_id += 1
elif len(a_parses[a_doc_id][SENTENCES]) > (snt_id + 1):
snt_id += 1
tok_id = 0
else:
return ("succ_none", "succ_tok_none")
return (a_parses[a_doc_id][SENTENCES][snt_id][WORDS][tok_id][TOK_IDX],
a_parses[a_doc_id][SENTENCES][snt_id][WORDS][tok_id][1][POS])
def _get_cat(self, a_conn_toks, a_tree):
"""Obtain syntactic category of a connective.
Args:
a_conn_toks (list):
list of connective's tokens
a_tree (dict):
syntactic tree
Returns:
str: syntactic category of the connective
"""
inode_id = self._get_path(a_conn_toks, a_tree)
return a_tree[inode_id].label()
def _get_prnt_cat(self, a_conn_toks, a_tree):
"""Obtain syntactic category of connective's parent.
Args:
a_conn_toks (list):
list of connective's tokens
a_tree (dict):
syntactic tree
Returns:
str: syntactic category of the connective
"""
inode_id = self._get_path(a_conn_toks, a_tree)
if len(a_conn_toks) > 1 and len(inode_id) > 1:
inode_id = inode_id[:-1]
if len(inode_id) > 1:
return a_tree[inode_id[:-1]].label()
else:
return DFLT_PRNT
def _get_sib(self, a_conn_toks, a_tree, a_side):
"""Find common ancestor of two nodes
Args:
a_conn_toks (list):
list of connective's tokens
a_tree (dict):
syntactic tree
a_side (int):
side of the sibling
Returns:
str: label of the sibling (or empty string)
"""
ret = "NONE"
inode_id = self._get_path(a_conn_toks, a_tree)
sib_path = [i for i in inode_id]
if a_side == LEFT:
if inode_id[-1] == 0:
return ret
sib_path[-1] -= 1
else:
if inode_id[-1] + 1 == len(a_tree[inode_id[:-1]]):
return ret
sib_path[-1] += 1
return a_tree[sib_path].label()
def _get_ctx_nodes(self, a_conn_toks, a_tree):
"""Find syntactic context nodes of the connective.
Args:
a_conn_toks (list):
list of connective's tokens
a_tree (dict):
syntactic tree
Returns:
list: list of context node labels
"""
ret = []
inode_id = self._get_path(a_conn_toks, a_tree)
if len(inode_id) <= 1:
return [DFLT_PRNT]
prnt_node = a_tree[inode_id[:-1]]
ret.append(prnt_node.label())
for inode in prnt_node:
ret.append(inode.label())
return ret
def _get_prev_conn(self, a_tok_id, a_tree, a_toks):
"""Obtain of the connective.
Args:
a_tok_id (int):
index of the first token of the connective
a_tree (dict):
syntactic tree
a_toks (list):
list of sentence tokens
Returns:
tuple:
list of previous connective tokens and list of their PoS tags
"""
ret_conn = []
ret_pos = []
# obtain tokens of the sentence in question
toks = [(i, t.lower()) for i, t in
enumerate(a_tree.leaves()[:a_tok_id])]
tokset = set(t[-1] for t in toks)
sent_len = len(toks)
# find matches of the first connective tokens
matches = tokset & CONNTOKS
if not matches:
return (ret_conn, ret_pos)
# generate mapping from sentence tokens to their indices
snt_tok2pos = defaultdict(list)
for i, t in toks:
snt_tok2pos[t].append(i)
matches = [(ipos, imatch) for imatch in matches
for ipos in snt_tok2pos[imatch]]
matches.sort(key=lambda el: el[0])
# generate mapping from connective parts to sentence indices
found = False
pos_tags = None
start_tok = None
start = prev_start_pos = start_pos = -1
# check each matched first token
for istart, imatch in matches:
# iterate over each connective which starts with that token
for i, iconn in CONNTOK2CONN[imatch]:
if i != 0:
continue
found = True
pos_tags = []
start = -1
prev_start_pos = istart
# iterate over each separate part of that connective
for jpart in iconn:
start_tok = jpart[0]
if start_tok not in snt_tok2pos:
found = False
break
# iterate over each token of that part
for start_pos in snt_tok2pos[start_tok]:
# make sure the next connective part starts after the
# previous one
if start_pos < prev_start_pos:
continue
if len(jpart) > sent_len - start_pos:
found = False
break
found = True
for ktok, snt_tok in zip(jpart, toks[start_pos:]):
if ktok != snt_tok[-1]:
found = False
break
if found:
pos_tags.extend(a_toks[t_id][1][POS] for t_id in
xrange(start_pos,
start_pos + len(jpart)))
break
if found:
if start == -1:
start = start_pos
prev_start_pos = start_pos
else:
break
if found:
ret_conn.append((start, iconn))
ret_pos.append((start, pos_tags))
# sort connectives and their pos tags according to the starting
# position
ret_conn.sort(key=lambda el: el[0])
ret_pos.sort(key=lambda el: el[0])
# return only connectives and their pos tags
return ([el[-1] for el in ret_conn], [el[-1] for el in ret_pos])
def _get_path(self, a_toks, a_tree):
"""Obtain path to the syntactic node covering all tokens.
Args:
a_toks (list):
list of token(s)
a_tree (dict):
syntactic tree
Returns:
tuple: path to the node covering all tokens
"""
if len(a_toks) > 1:
istart, iend = min(a_toks), max(a_toks) + 1
return a_tree.treeposition_spanning_leaves(istart, iend)
else:
path = a_tree.leaf_treeposition(a_toks[0])
if len(path) > 2:
path = path[:-2]
elif len(path) > 1:
path = path[:-1]
return path
def _get_negation(self, a_feats, a_toks1, a_toks2):
"""Estimate polarity values of the given relation.
Args:
a_feats (dict):
target feature dictionary
a_toks1 (list(str)):
list of tokens from the 1-st argument
a_toks2 (list(str)):
list of tokens from the 2-nd argument
Returns:
void: updates `a_feats` dictionary in place
"""
neg1 = self._get_arg_negation(a_toks1)
neg2 = self._get_arg_negation(a_toks2)
#add negation features
a_feats["Neg1-" + neg1] = 1
a_feats["Neg2-" + neg2] = 1
a_feats["JointNeg-" + neg1 + "|" + neg2] = 1
def _get_arg_negation(self, a_toks):
"""Estimate polarity of the given relation argument.
Args:
a_toks (list): indices of argument's tokens
Returns:
list: argument's tokens
"""
ret = "pos"
bigram = None
for i, itok in enumerate(a_toks):
itok = itok[0]
if itok in NEGATION:
ret = "neg"
return ret
def _get_modality(self, a_feats, a_toks1, a_toks2):
"""Estimate modality of the given relation.
Args:
a_feats (dict):
target feature dictionary
a_toks1 (list):
list of tokens from the 1-st argument
a_toks2 (list):
list of tokens from the 2-nd argument
Returns:
void:
Note:
updates ``a_feats`` dictionary in place
"""
mod1 = self._get_arg_modality(a_toks1)
mod2 = self._get_arg_modality(a_toks2)
joint_mod = [i * j for i in mod1 for j in mod2]
# add modality features
a_feats["Mod1-" + ''.join(str(i) for i in mod1)] = 1.
a_feats["Mod2-" + ''.join(str(i) for i in mod2)] = 1.
a_feats["JointMod-" + ''.join(str(i) for i in joint_mod)] = 1.
def _get_arg_modality(self, a_toks):
"""Estimate modality of the given relation argument.
Args:
a_toks (list): argument's tokens
Returns:
list(int):
"""
ret = [0] * 7
bigram = None
max_i = len(a_toks) - 1
for i, itok in enumerate(a_toks):
itok = itok[0]
if itok in MODALITY:
ret[MODALITY[itok]] = 1
if i < max_i:
bigram = itok + ' ' + a_toks[i + 1][0]
if bigram in MODALITY:
ret[MODALITY[bigram]] = 1
return ret
def _get_toks_pos(self, a_parses, a_rel, a_arg):
"""Method for getting raw tokens with their parts of speech.
Args:
a_parses (dict):
parsed sentences
a_rel (dict):
discourse relation whose tokens should be obtained
a_arg (str):
relation argument to obtain senses for
Returns:
list:
list of tokens and their parts of speech
"""
ret = []
snt = wrd = None
for s_id, w_ids in \
self._get_snt2tok(a_rel[a_arg][TOK_LIST]).iteritems():
snt = a_parses[s_id][WORDS]
for w_id in w_ids:
wrd = snt[w_id]
ret.append((wrd[TOK_IDX].lower(), wrd[1][POS]))
return ret
def _get_snt2tok(self, a_tok_list):
"""Generate mapping from sentence indices to token lists.
Args:
a_tok_list (list):
list of sentence and token indices pertaining to the argument
Returns:
collections.defaultdict:
mapping from sentence indices to token lists
"""
snt2tok_pos = defaultdict(set)
for el in a_tok_list:
snt_id = el[SNT_ID]
snt2tok_pos[snt_id].add(el[TOK_ID])
return snt2tok_pos
def _escape_feat(self, a_feat):
"""Replace characters that might confuse dict vectorize.
Args:
a_feat (str): feature to be escaped
Return:
str: escaped feature
"""
a_feat = MULTISPACE_RE.sub(' ', a_feat).strip()
return EQ_RE.sub('_', a_feat)
|
|
#!/usr/bin/python
import datetime
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ceph_volume
short_description: Create ceph OSDs with ceph-volume
description:
- Using the ceph-volume utility available in Ceph this module
can be used to create ceph OSDs that are backed by logical volumes.
- Only available in ceph versions luminous or greater.
options:
cluster:
description:
- The ceph cluster name.
required: false
default: ceph
subcommand:
description:
- The ceph-volume subcommand to use.
required: false
default: lvm
choices: ['lvm']
objectstore:
description:
- The objectstore of the OSD, either filestore or bluestore
required: true
choices: ['bluestore', 'filestore']
data:
description:
- The logical volume name or device to use for the OSD data.
required: true
data_vg:
description:
- If data is a lv, this must be the name of the volume group it belongs to.
required: false
journal:
description:
- The logical volume name or partition to use as a filestore journal.
- Only applicable if objectstore is 'filestore'.
required: false
journal_vg:
description:
- If journal is a lv, this must be the name of the volume group it belongs to.
- Only applicable if objectstore is 'filestore'.
required: false
db:
description:
- A partition or logical volume name to use for block.db.
- Only applicable if objectstore is 'bluestore'.
required: false
db_vg:
description:
- If db is a lv, this must be the name of the volume group it belongs to.
- Only applicable if objectstore is 'bluestore'.
required: false
wal:
description:
- A partition or logical volume name to use for block.wal.
- Only applicable if objectstore is 'bluestore'.
required: false
wal_vg:
description:
- If wal is a lv, this must be the name of the volume group it belongs to.
- Only applicable if objectstore is 'bluestore'.
required: false
crush_device_class:
description:
- Will set the crush device class for the OSD.
required: false
dmcrypt:
description:
- If set to True the OSD will be encrypted with dmcrypt.
required: false
author:
- Andrew Schoen (@andrewschoen)
'''
EXAMPLES = '''
- name: set up a filestore osd with an lv data and a journal partition
ceph_volume:
objectstore: filestore
data: data-lv
data_vg: data-vg
journal: /dev/sdc1
- name: set up a bluestore osd with a raw device for data
ceph_volume:
objectstore: bluestore
data: /dev/sdc
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db
ceph_volume:
objectstore: bluestore
data: data-lv
data_vg: data-vg
db: /dev/sdc1
wal: /dev/sdc2
'''
from ansible.module_utils.basic import AnsibleModule
def get_data(data, data_vg):
if data_vg:
data = "{0}/{1}".format(data_vg, data)
return data
def get_journal(journal, journal_vg):
if journal_vg:
journal = "{0}/{1}".format(journal_vg, journal)
return journal
def get_db(db, db_vg):
if db_vg:
db = "{0}/{1}".format(db_vg, db)
return db
def get_wal(wal, wal_vg):
if wal_vg:
wal = "{0}/{1}".format(wal_vg, wal)
return wal
def run_module():
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
subcommand=dict(type='str', required=False, default='lvm'),
objectstore=dict(type='str', required=True),
data=dict(type='str', required=True),
data_vg=dict(type='str', required=False),
journal=dict(type='str', required=False),
journal_vg=dict(type='str', required=False),
db=dict(type='str', required=False),
db_vg=dict(type='str', required=False),
wal=dict(type='str', required=False),
wal_vg=dict(type='str', required=False),
crush_device_class=dict(type='str', required=False),
dmcrypt=dict(type='bool', required=False, default=False),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
cluster = module.params['cluster']
subcommand = module.params['subcommand']
objectstore = module.params['objectstore']
data = module.params['data']
data_vg = module.params.get('data_vg', None)
journal = module.params.get('journal', None)
journal_vg = module.params.get('journal_vg', None)
db = module.params.get('db', None)
db_vg = module.params.get('db_vg', None)
wal = module.params.get('wal', None)
wal_vg = module.params.get('wal_vg', None)
crush_device_class = module.params.get('crush_device_class', None)
dmcrypt = module.params['dmcrypt']
cmd = [
'ceph-volume',
'--cluster',
cluster,
subcommand,
'create',
'--%s' % objectstore,
'--data',
]
data = get_data(data, data_vg)
cmd.append(data)
if journal:
journal = get_journal(journal, journal_vg)
cmd.extend(["--journal", journal])
if db:
db = get_db(db, db_vg)
cmd.extend(["--block.db", db])
if wal:
wal = get_wal(wal, wal_vg)
cmd.extend(["--block.wal", wal])
if crush_device_class:
cmd.extend(["--crush-device-class", crush_device_class])
if dmcrypt:
cmd.append("--dmcrypt")
result = dict(
changed=False,
cmd=cmd,
stdout='',
stderr='',
rc='',
start='',
end='',
delta='',
)
if module.check_mode:
return result
# check to see if osd already exists
# FIXME: this does not work when data is a raw device
rc, out, err = module.run_command(["ceph-volume", "lvm", "list", data], encoding=None)
if rc == 0:
result["stdout"] = "skipped, since {0} is already used for an osd".format(data)
result['rc'] = 0
module.exit_json(**result)
startd = datetime.datetime.now()
rc, out, err = module.run_command(cmd, encoding=None)
endd = datetime.datetime.now()
delta = endd - startd
result = dict(
cmd=cmd,
stdout=out.rstrip(b"\r\n"),
stderr=err.rstrip(b"\r\n"),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
|
import time
import tqdm
import logging
import itertools
import functools
import collections
import networkx as nx
from indra.util import fast_deepcopy
from indra.statements import *
from indra.statements import stmt_type as indra_stmt_type
from .refinement import *
logger = logging.getLogger(__name__)
class Preassembler(object):
"""De-duplicates statements and arranges them in a specificity hierarchy.
Parameters
----------
ontology : :py:class:`indra.ontology.IndraOntology`
An INDRA Ontology object.
stmts : list of :py:class:`indra.statements.Statement` or None
A set of statements to perform pre-assembly on. If None, statements
should be added using the :py:meth:`add_statements` method.
matches_fun : Optional[function]
A functon which takes a Statement object as argument and
returns a string key that is used for duplicate recognition. If
supplied, it overrides the use of the built-in matches_key method of
each Statement being assembled.
refinement_fun : Optional[function]
A function which takes two Statement objects and an ontology
as an argument and returns True or False. If supplied, it overrides
the built-in refinement_of method of each Statement being assembled.
Attributes
----------
stmts : list of :py:class:`indra.statements.Statement`
Starting set of statements for preassembly.
unique_stmts : list of :py:class:`indra.statements.Statement`
Statements resulting from combining duplicates.
related_stmts : list of :py:class:`indra.statements.Statement`
Top-level statements after building the refinement hierarchy.
ontology : dict[:py:class:`indra.preassembler.ontology_graph.IndraOntology`]
An INDRA Ontology object.
"""
def __init__(self, ontology, stmts=None, matches_fun=None,
refinement_fun=None):
self.ontology = ontology
if stmts:
logger.debug("Deepcopying stmts in __init__")
self.stmts = fast_deepcopy(stmts)
else:
self.stmts = []
self.unique_stmts = None
self.related_stmts = None
self.matches_fun = matches_fun if matches_fun else \
default_matches_fun
self.refinement_fun = refinement_fun if refinement_fun else \
default_refinement_fun
self._comparison_counter = 0
def add_statements(self, stmts):
"""Add to the current list of statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Statements to add to the current list.
"""
self.stmts += fast_deepcopy(stmts)
def combine_duplicates(self):
"""Combine duplicates among `stmts` and save result in `unique_stmts`.
A wrapper around the method :py:meth:`combine_duplicate_stmts`.
"""
if self.unique_stmts is None:
self.unique_stmts = self.combine_duplicate_stmts(self.stmts)
return self.unique_stmts
def _get_stmt_matching_groups(self, stmts):
"""Use the matches_fun method to get sets of matching statements."""
# Remove exact duplicates using a set() call, then make copies:
logger.debug('%d statements before removing object duplicates.' %
len(stmts))
st = list(set(stmts))
logger.debug('%d statements after removing object duplicates.' %
len(stmts))
# Group statements according to whether they are matches (differing
# only in their evidence).
# Sort the statements in place by matches_key()
st.sort(key=self.matches_fun)
return itertools.groupby(st, key=self.matches_fun)
def combine_duplicate_stmts(self, stmts):
"""Combine evidence from duplicate Statements.
Statements are deemed to be duplicates if they have the same key
returned by the `matches_key()` method of the Statement class. This
generally means that statements must be identical in terms of their
arguments and can differ only in their associated `Evidence` objects.
This function keeps the first instance of each set of duplicate
statements and merges the lists of Evidence from all of the other
statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Set of statements to de-duplicate.
Returns
-------
list of :py:class:`indra.statements.Statement`
Unique statements with accumulated evidence across duplicates.
Examples
--------
De-duplicate and combine evidence for two statements differing only
in their evidence lists:
>>> from indra.ontology.bio import bio_ontology
>>> map2k1 = Agent('MAP2K1')
>>> mapk1 = Agent('MAPK1')
>>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 1')])
>>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 2')])
>>> pa = Preassembler(bio_ontology)
>>> uniq_stmts = pa.combine_duplicate_stmts([stmt1, stmt2])
>>> uniq_stmts
[Phosphorylation(MAP2K1(), MAPK1(), T, 185)]
>>> sorted([e.text for e in uniq_stmts[0].evidence])
['evidence 1', 'evidence 2']
"""
# Helper function to get a list of evidence matches keys
def _ev_keys(sts):
ev_keys = []
for stmt in sts:
for ev in stmt.evidence:
ev_keys.append(ev.matches_key())
return ev_keys
# Iterate over groups of duplicate statements
unique_stmts = []
for _, duplicates in self._get_stmt_matching_groups(stmts):
ev_keys = set()
# Get the first statement and add the evidence of all subsequent
# Statements to it
duplicates = list(duplicates)
start_ev_keys = _ev_keys(duplicates)
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
new_stmt = stmt.make_generic_copy()
if len(duplicates) == 1:
new_stmt.uuid = stmt.uuid
raw_text = [None if ag is None else ag.db_refs.get('TEXT')
for ag in stmt.agent_list(deep_sorted=True)]
raw_grounding = [None if ag is None else ag.db_refs
for ag in stmt.agent_list(deep_sorted=True)]
for ev in stmt.evidence:
ev_key = ev.matches_key() + str(raw_text) + \
str(raw_grounding)
if ev_key not in ev_keys:
# In case there are already agents annotations, we
# just add a new key for raw_text, otherwise create
# a new key
if 'agents' in ev.annotations:
ev.annotations['agents']['raw_text'] = raw_text
ev.annotations['agents']['raw_grounding'] = \
raw_grounding
else:
ev.annotations['agents'] = \
{'raw_text': raw_text,
'raw_grounding': raw_grounding}
if 'prior_uuids' not in ev.annotations:
ev.annotations['prior_uuids'] = []
ev.annotations['prior_uuids'].append(stmt.uuid)
new_stmt.evidence.append(ev)
ev_keys.add(ev_key)
end_ev_keys = _ev_keys([new_stmt])
if len(end_ev_keys) != len(start_ev_keys):
logger.debug('%d redundant evidences eliminated.' %
(len(start_ev_keys) - len(end_ev_keys)))
# This should never be None or anything else
assert isinstance(new_stmt, Statement)
unique_stmts.append(new_stmt)
# At this point, we should do a hash refresh so that the statements
# returned don't have stale hashes.
for stmt in unique_stmts:
for shallow in (True, False):
stmt.get_hash(shallow=shallow, refresh=True,
matches_fun=self.matches_fun)
return unique_stmts
# Note that the kwargs here are just there for backwards compatibility
# with old code that uses arguments related to multiprocessing.
def combine_related(self, return_toplevel=True, filters=None, **kwargs):
"""Connect related statements based on their refinement relationships.
This function takes as a starting point the unique statements (with
duplicates removed) and returns a modified flat list of statements
containing only those statements which do not represent a refinement of
other existing statements. In other words, the more general versions of
a given statement do not appear at the top level, but instead are
listed in the `supports` field of the top-level statements.
If :py:attr:`unique_stmts` has not been initialized with the
de-duplicated statements, :py:meth:`combine_duplicates` is called
internally.
After this function is called the attribute :py:attr:`related_stmts` is
set as a side-effect.
The procedure for combining statements in this way involves a series
of steps:
1. The statements are subjected to (built-in or user-supplied) filters
that group them based on potential refinement relationships. For
instance, the ontology-based filter positions each statement,
based on its agent arguments, with the ontology, and determines
potential refinements based on paths in the ontology graph.
2. Each statement is then compared with the set of statements it
can potentially refine, as determined by the pre-filters.
If the statement represents a refinement of
the other (as defined by the `refinement_of()` method implemented
for the Statement), then the more refined statement is added
to the `supports` field of the more general statement, and the
more general statement is added to the `supported_by` field of
the more refined statement.
3. A new flat list of statements is created that contains only those
statements that have no `supports` entries (statements containing
such entries are not eliminated, because they will be retrievable
from the `supported_by` fields of other statements). This list
is returned to the caller.
.. note:: Subfamily relationships must be consistent across arguments
For now, we require that merges can only occur if the *isa*
relationships are all in the *same direction for all the agents* in
a Statement. For example, the two statement groups: `RAF_family ->
MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF
*isa* RAF_family, but MEK_family is not a MEK1. In the future this
restriction could be revisited.
Parameters
----------
return_toplevel : Optional[bool]
If True only the top level statements are returned.
If False, all statements are returned. Default: True
filters : Optional[list[:py:class:`indra.preassembler.refinement.RefinementFilter`]]
A list of RefinementFilter classes that implement filters on
possible statement refinements. For details on how to
construct such a filter, see the documentation of
:py:class:`indra.preassembler.refinement.RefinementFilter`.
If no user-supplied filters are provided, the default ontology-based
filter is applied. If a list of filters is provided here, the
:py:class:`indra.preassembler.refinement.OntologyRefinementFilter`
isn't appended by default, and should be added by the user, if
necessary. Default: None
Returns
-------
list of :py:class:`indra.statement.Statement`
The returned list contains Statements representing the more
concrete/refined versions of the Statements involving particular
entities. The attribute :py:attr:`related_stmts` is also set to
this list. However, if return_toplevel is False then all
statements are returned, irrespective of level of specificity.
In this case the relationships between statements can
be accessed via the supports/supported_by attributes.
Examples
--------
A more general statement with no information about a Phosphorylation
site is identified as supporting a more specific statement:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS
>>> combined_stmts
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> combined_stmts[0].supported_by
[Phosphorylation(BRAF(), MAP2K1())]
>>> combined_stmts[0].supported_by[0].supports
[Phosphorylation(BRAF(), MAP2K1(), S)]
"""
if self.related_stmts is not None:
if return_toplevel:
return self.related_stmts
else:
assert self.unique_stmts is not None
return self.unique_stmts
# Call combine_duplicates, which lazily initializes self.unique_stmts
unique_stmts = self.combine_duplicates()
# Generate the index map, linking related statements.
idx_map = self._generate_id_maps(unique_stmts,
filters=filters)
# Now iterate over all indices and set supports/supported by
for ix1, ix2 in idx_map:
unique_stmts[ix1].supported_by.append(unique_stmts[ix2])
unique_stmts[ix2].supports.append(unique_stmts[ix1])
# Get the top level statements
self.related_stmts = [st for st in unique_stmts if not st.supports]
logger.debug('%d top level' % len(self.related_stmts))
if return_toplevel:
return self.related_stmts
else:
return unique_stmts
# Note that the kwargs here are just there for backwards compatibility
# with old code that uses arguments related to multiprocessing.
def _generate_id_maps(self, unique_stmts, split_idx=None,
filters=None, **kwargs):
"""Return pairs of statement indices representing refinement relations.
Parameters
----------
unique_stmts : list[indra.statements.Statement]
A list of de-duplicated INDRA Statements.
split_idx : Optional[int]
An index at which the flat list of unique statements should be split
and compared for refinements only across the two groups, not
within each group. By default, no splitting is done and all statements
are compared for refinements.
filters : Optional[list[:py:class:`indra.preassembler.refinement.RefinementFilter`]]
A list of RefinementFilter classes that implement filters on
possible statement refinements. For details on how to
construct such a filter, see the documentation of
:py:class:`indra.preassembler.refinement.RefinementFilter`.
If no user-supplied filters are provided, the default ontology-based
filter is applied. If a list of filters is provided here, the
:py:class:`indra.preassembler.refinement.OntologyRefinementFilter`
isn't appended by default, and should be added by the user, if
necessary. Default: None
Returns
-------
list[tuple]
A list of tuples where the first element of each tuple is
the linear index of a statement in the unique stmts list
which refines the statement whose index is the second
element of the tuple.
"""
ts = time.time()
# Make a list of Statement types
stmt_to_idx = {stmt.get_hash(matches_fun=self.matches_fun): idx
for idx, stmt in enumerate(unique_stmts)}
if len(unique_stmts) != len(stmt_to_idx):
raise ValueError('The unique statements used as an input for '
'finding refinements do not all have distinct '
'matches key hashes. This could be due to cached '
'hashes being outdated or hashes not having been '
'calculated according to a custom matches key '
'function used for refinement finding.')
# Statements keyed by their hashes
stmts_by_hash = {stmt.get_hash(matches_fun=self.matches_fun):
stmt for stmt in unique_stmts}
# Here we apply any additional filters to cut down the number of
# potential comparisons before actually making comparisons
if not filters:
filters = [OntologyRefinementFilter(ontology=self.ontology)]
# Here we handle split_idx to allow finding refinements between
# two distinct groups of statements (identified by an index at which we
# split the unique_statements list) rather than globally across
# all unique statements.
if split_idx:
split_groups = {sh: (idx <= split_idx)
for sh, idx in stmt_to_idx.items()}
sgf = SplitGroupFilter(split_groups=split_groups)
filters.append(sgf)
# We can now append the confirmation filter
confirm_filter = \
RefinementConfirmationFilter(ontology=self.ontology,
refinement_fun=self.refinement_fun)
filters.append(confirm_filter)
# Initialize all filters
for filt in filters:
filt.initialize(stmts_by_hash=stmts_by_hash)
# This is the core of refinement finding. Here we apply filter functions
# per statement, sequentially.
# Since the actual comparison which evaluates the refinement_fun on
# potentially related statements is the last filter, we don't need to
# do any further operations after this loop.
relations = {}
for stmt_hash, stmt in tqdm.tqdm(stmts_by_hash.items(),
desc='Finding refinement relations'):
relations[stmt_hash] = \
find_refinements_for_statement(stmt, filters)
te = time.time()
logger.info('Found all refinements in %.2fs' % (te-ts))
self._comparison_counter = confirm_filter.comparison_counter
logger.info('Total comparisons: %d' % self._comparison_counter)
idx_maps = []
for refiner, refineds in relations.items():
idx_maps += [(stmt_to_idx[refiner], stmt_to_idx[refined])
for refined in refineds]
return idx_maps
def find_contradicts(self):
"""Return pairs of contradicting Statements.
Returns
-------
contradicts : list(tuple(Statement, Statement))
A list of Statement pairs that are contradicting.
"""
# Make a dict of Statement by type
stmts_by_type = collections.defaultdict(list)
for stmt in self.stmts:
stmts_by_type[indra_stmt_type(stmt)].append(stmt)
stmts_by_type = dict(stmts_by_type)
# Handle Statements with polarity first
pos_stmts = AddModification.__subclasses__()
neg_stmts = [modclass_to_inverse[c] for c in pos_stmts]
pos_stmts += [Activation, IncreaseAmount]
neg_stmts += [Inhibition, DecreaseAmount]
contradicts = []
# Handle statements with polarity first
# TODO: we could probably do some optimization here
# to not have to check statements combinatorially
for pst, nst in zip(pos_stmts, neg_stmts):
poss = stmts_by_type.get(pst, [])
negs = stmts_by_type.get(nst, [])
for ps, ns in itertools.product(poss, negs):
if ps.contradicts(ns, self.ontology):
contradicts.append((ps, ns))
# Handle neutral Statements next
neu_stmts = [Influence, ActiveForm]
for stt in neu_stmts:
stmts = stmts_by_type.get(stt, [])
for st1, st2 in itertools.combinations(stmts, 2):
if st1.contradicts(st2, self.ontology):
contradicts.append((st1, st2))
return contradicts
def _normalize_relations(self, ns, rank_key, rel_fun, flip_polarity):
# Find related entries, sort them, and return the first one which is
# the one that will be normalized to
def _replace_grounding(ns, entry, rank_key, rel_fun):
rel_ents = rel_fun(ns, entry)
if rel_ents:
rel_ents = [(ns, e.split('#')[1] if '#' in e else e)
for ns, e in rel_ents]
sorted_entries = sorted([(ns, entry)] + rel_ents,
key=rank_key)
_, chosen = sorted_entries[0]
return chosen, chosen != entry
else:
return entry, False
# If no custom rank_key was provided we use the original value to
# sort by
if rank_key is None:
def polarity_rank_key(args):
ns, entry = args
pol = self.ontology.get_polarity(ns, entry)
# Here we flip polarities to rank positive polarity before
# negative
pol_rank = -1 if pol is None else -pol
return pol_rank, entry
rank_key = polarity_rank_key
# We now go agent by agent to normalize grounding
for stmt in self.stmts:
for agent_idx, agent in enumerate(stmt.agent_list()):
# If the relevant namespace is an entry
if agent is not None and ns in agent.db_refs:
grounding = agent.db_refs[ns]
# If we have a list, we iterate over it and normalize
# each entry separately
if isinstance(grounding, list):
new_grounding = []
for idx, (entry, score) in enumerate(grounding):
chosen, changed = _replace_grounding(ns, entry,
rank_key,
rel_fun)
new_grounding.append((chosen, score))
# If the top grounding was changed and we need
# to flip polarity then the Statement's polarity
# is flipped
if idx == 0 and changed and flip_polarity:
stmt.flip_polarity(agent_idx=agent_idx)
agent.db_refs[ns] = new_grounding
# If there's only one grounding then we just normalize
# that one
else:
chosen, changed = _replace_grounding(ns, grounding,
rank_key, rel_fun)
agent.db_refs[ns] = chosen
if changed and flip_polarity:
stmt.flip_polarity(agent_idx=agent_idx)
def normalize_equivalences(self, ns, rank_key=None):
"""Normalize to one of a set of equivalent concepts across statements.
This function changes Statements in place without returning a value.
Parameters
----------
ns : str
The db_refs namespace for which the equivalence relation should
be applied.
rank_key : Optional[function]
A function handle which assigns a sort key to each entry in the
given namespace to allow prioritizing in a controlled way which
concept is normalized to.
"""
rel_fun = functools.partial(self.ontology.child_rel,
rel_types={'is_equal'})
self._normalize_relations(ns, rank_key, rel_fun, False)
def normalize_opposites(self, ns, rank_key=None):
"""Normalize to one of a pair of opposite concepts across statements.
This function changes Statements in place without returning a value.
Parameters
----------
ns : str
The db_refs namespace for which the opposite relation should
be applied.
rank_key : Optional[function]
A function handle which assigns a sort key to each entry in the
given namespace to allow prioritizing in a controlled way which
concept is normalized to.
"""
rel_fun = functools.partial(self.ontology.child_rel,
rel_types={'is_opposite'})
self._normalize_relations(ns, rank_key, rel_fun, True)
def find_refinements_for_statement(stmt, filters):
"""Return refinements for a single statement given initialized filters.
Parameters
----------
stmt : indra.statements.Statement
The statement whose relations should be found.
filters : list[:py:class:`indra.preassembler.refinement.RefinementFilter`]
A list of refinement filter instances. The filters passed to this
function need to have been initialized with stmts_by_hash.
Returns
-------
set
A set of statement hashes that this statement refines.
"""
first_filter = True
relations = {}
for filt in filters:
# The first filter outputs all the possible relations that it
# can find, while subsequent filters are taking the results of
# the previous filter as the basis of further filtering down
# on possible refinements.
possibly_related = None if first_filter else relations
# We pass in the specific statement and any constraints on
# previously determined possible relations to the filter.
relations = filt.get_less_specifics(stmt,
possibly_related=possibly_related)
first_filter = False
return relations
def render_stmt_graph(statements, reduce=True, english=False, rankdir=None,
agent_style=None):
"""Render the statement hierarchy as a pygraphviz graph.
Parameters
----------
statements : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
reduce : bool
Whether to perform a transitive reduction of the edges in the graph.
Default is True.
english : bool
If True, the statements in the graph are represented by their
English-assembled equivalent; otherwise they are represented as
text-formatted Statements.
rankdir : str or None
Argument to pass through to the pygraphviz `AGraph` constructor
specifying graph layout direction. In particular, a value of 'LR'
specifies a left-to-right direction. If None, the pygraphviz default
is used.
agent_style : dict or None
Dict of attributes specifying the visual properties of nodes. If None,
the following default attributes are used::
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
Returns
-------
pygraphviz.AGraph
Pygraphviz graph with nodes representing statements and edges pointing
from supported statements to supported_by statements.
Examples
--------
Pattern for getting statements and rendering as a Graphviz graph:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> graph = render_stmt_graph(pa.related_stmts)
>>> graph.write('example_graph.dot') # To make the DOT file
>>> graph.draw('example_graph.png', prog='dot') # To make an image
Resulting graph:
.. image:: /images/example_graph.png
:align: center
:alt: Example statement graph rendered by Graphviz
"""
import pygraphviz as pgv
from indra.assemblers.english import EnglishAssembler
# Set the default agent formatting properties
if agent_style is None:
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
# Sets to store all of the nodes and edges as we recursively process all
# of the statements
nodes = set([])
edges = set([])
stmt_dict = {}
# Recursive function for processing all statements
def process_stmt(stmt):
nodes.add(str(stmt.matches_key()))
stmt_dict[str(stmt.matches_key())] = stmt
for sby_ix, sby_stmt in enumerate(stmt.supported_by):
edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key())))
process_stmt(sby_stmt)
# Process all of the top-level statements, getting the supporting statements
# recursively
for stmt in statements:
process_stmt(stmt)
# Create a networkx graph from the nodes
nx_graph = nx.DiGraph()
nx_graph.add_edges_from(edges)
# Perform transitive reduction if desired
if reduce:
nx_graph = nx.algorithms.dag.transitive_reduction(nx_graph)
# Create a pygraphviz graph from the nx graph
try:
pgv_graph = pgv.AGraph(name='statements', directed=True,
rankdir=rankdir)
except NameError:
logger.error('Cannot generate graph because '
'pygraphviz could not be imported.')
return None
for node in nx_graph.nodes():
stmt = stmt_dict[node]
if english:
ea = EnglishAssembler([stmt])
stmt_str = ea.make_model()
else:
stmt_str = str(stmt)
pgv_graph.add_node(node,
label='%s (%d)' % (stmt_str, len(stmt.evidence)),
**agent_style)
pgv_graph.add_edges_from(nx_graph.edges())
return pgv_graph
def flatten_stmts(stmts):
"""Return the full set of unique stms in a pre-assembled stmt graph.
The flattened list of statements returned by this function can be
compared to the original set of unique statements to make sure no
statements have been lost during the preassembly process.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
List of all statements contained in the hierarchical statement graph.
Examples
--------
Calling :py:meth:`combine_related` on two statements results in one
top-level statement; calling :py:func:`flatten_stmts` recovers both:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> flattened = flatten_stmts(pa.related_stmts)
>>> flattened.sort(key=lambda x: x.matches_key())
>>> flattened
[Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)]
"""
total_stmts = set(stmts)
for stmt in stmts:
if stmt.supported_by:
children = flatten_stmts(stmt.supported_by)
total_stmts = total_stmts.union(children)
return list(total_stmts)
def flatten_evidence(stmts, collect_from=None):
"""Add evidence from *supporting* stmts to evidence for *supported* stmts.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
collect_from : str in ('supports', 'supported_by')
String indicating whether to collect and flatten evidence from the
`supports` attribute of each statement or the `supported_by` attribute.
If not set, defaults to 'supported_by'.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
Statement hierarchy identical to the one passed, but with the
evidence lists for each statement now containing all of the evidence
associated with the statements they are supported by.
Examples
--------
Flattening evidence adds the two pieces of evidence from the supporting
statement to the evidence list of the top-level statement:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1,
... evidence=[Evidence(text='foo'), Evidence(text='bar')])
>>> st2 = Phosphorylation(braf, map2k1, residue='S',
... evidence=[Evidence(text='baz'), Evidence(text='bak')])
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> [e.text for e in pa.related_stmts[0].evidence]
['baz', 'bak']
>>> flattened = flatten_evidence(pa.related_stmts)
>>> sorted([e.text for e in flattened[0].evidence])
['bak', 'bar', 'baz', 'foo']
"""
if collect_from is None:
collect_from = 'supported_by'
if collect_from not in ('supports', 'supported_by'):
raise ValueError('collect_from must be one of "supports", '
'"supported_by"')
logger.info('Flattening evidence based on %s' % collect_from)
# Copy all of the statements--these will be the ones where we update
# the evidence lists
stmts = fast_deepcopy(stmts)
for stmt in stmts:
# We get the original evidence keys here so we can differentiate them
# from ones added during flattening.
orig_ev_keys = [ev.matches_key() for ev in stmt.evidence]
# We now do the flattening
total_evidence = _flatten_evidence_for_stmt(stmt, collect_from)
# Here we add annotations for each evidence in the list,
# depending on whether it's an original direct evidence or one that
# was added during flattening
new_evidence = []
for ev in total_evidence:
ev_key = ev.matches_key()
if ev_key in orig_ev_keys:
ev.annotations['support_type'] = 'direct'
new_evidence.append(ev)
else:
ev_copy = fast_deepcopy(ev)
ev_copy.annotations['support_type'] = collect_from
new_evidence.append(ev_copy)
# Now set the new evidence list as the copied statement's evidence
stmt.evidence = new_evidence
return stmts
def _flatten_evidence_for_stmt(stmt, collect_from):
supp_stmts = (stmt.supports if collect_from == 'supports'
else stmt.supported_by)
total_evidence = set(stmt.evidence)
for supp_stmt in supp_stmts:
child_evidence = _flatten_evidence_for_stmt(supp_stmt, collect_from)
total_evidence = total_evidence.union(child_evidence)
return list(total_evidence)
def default_matches_fun(st):
return st.matches_key()
|
|
#!/usr/bin/env python
##########################################################################################
# Developer: Luan,Jingchao Project: HuMaIN (http://humain.acis.ufl.edu)
# Description:
# Extract the individual line images from a binarized image, based on the default
# parameters or parameters set by user.
##########################################################################################
# Copyright 2017 Advanced Computing and Information Systems (ACIS) Lab - UF
# (https://www.acis.ufl.edu/)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################################
# TODO:
# ! add option for padding
# - fix occasionally missing page numbers
# - treat large h-whitespace as separator
# - handle overlapping candidates
# - use cc distance statistics instead of character scale
# - page frame detection
# - read and use text image segmentation mask
# - pick up stragglers
# ? laplacian as well
from __future__ import print_function
from pylab import *
import glob,os,os.path
import traceback
from scipy.ndimage import measurements
from scipy.misc import imsave
from scipy.ndimage.filters import gaussian_filter,uniform_filter,maximum_filter
from multiprocessing import Pool
import ocrolib
from ocrolib import psegutils,morph,sl
from ocrolib.exceptions import OcropusException
from ocrolib.toplevel import *
from numpy import amax, amin
from django.conf import settings
import logging
# The default parameters values
args_default = {
### The following 15 parameters can be set by user
# limits
'minscale':1.0, # minimum scale permitted
'maxlines':300, # maximum # lines permitted
# scale parameters
'scale':0.0, # the basic scale of the document (roughly, xheight) 0=automatic
'hscale':1.0, # non-standard scaling of horizontal parameters
'vscale':1.0, # non-standard scaling of vertical parameters
# line parameters
'threshold':0.2, # baseline threshold
'noise':8, # noise threshold for removing small components from lines
'usegauss':False, # use gaussian instead of uniform
# column separator parameters
'maxseps':0, # maximum # black column separators
'sepwiden':10, # widen black separators (to account for warping)
'maxcolseps':3, # maximum # whitespace column separators
'csminheight':10.0,# minimum column height (units=scale)
# output parameters
'pad':3, # adding for extracted lines
'expand':3, # expand mask for grayscale extraction
### The following parameters cannot be overwritten by users
# other parameters
'nocheck':True, # enable error checking on inputs
'quiet':False, # be less verbose, usally use with parallel together
'debug':False
#'parallel':0 # number of parallel processes to use
}
# The global variable
args = {}
logger = logging.getLogger('segmentation')
# The entry of segmentation service
# Return the directories, each directory related to a input image and stored the segmented line images
def segmentation_exec(image, parameters):
# Update parameters values customed by user
# Each time update the args with the default args dictionary, avoid the effect of the previous update
global args
args = args_default.copy()
args.update(parameters)
# Segment the image
output = None # key: single-line image name. value: single-line image object
try:
output = process(image)
except OcropusException as e:
if e.trace:
traceback.print_exc()
else:
logger.error(image+":"+e)
except Exception as e:
traceback.print_exc()
return output
def norm_max(v):
return v/amax(v)
def check_page(image):
if len(image.shape)==3: return "input image is color image %s"%(image.shape,)
if mean(image)<median(image): return "image may be inverted"
h,w = image.shape
if h<600: return "image not tall enough for a page image %s"%(image.shape,)
if h>10000: return "image too tall for a page image %s"%(image.shape,)
if w<600: return "image too narrow for a page image %s"%(image.shape,)
if w>10000: return "line too wide for a page image %s"%(image.shape,)
slots = int(w*h*1.0/(30*30))
_,ncomps = measurements.label(image>mean(image))
if ncomps<10: return "too few connected components for a page image (got %d)"%(ncomps,)
if ncomps>slots: return "too many connnected components for a page image (%d > %d)"%(ncomps,slots)
return None
def print_info(*objs):
print("INFO: ", *objs, file=sys.stdout)
def print_error(*objs):
print("ERROR: ", *objs, file=sys.stderr)
def B(a):
if a.dtype==dtype('B'): return a
return array(a,'B')
def DSAVE(title,image):
if not args['debug']: return
if type(image)==list:
assert len(image)==3
image = transpose(array(image),[1,2,0])
fname = "_"+title+".png"
#logger.info("debug " + fname)
imsave(fname,image)
################################################################
### Column finding.
###
### This attempts to find column separators, either as extended
### vertical black lines or extended vertical whitespace.
### It will work fairly well in simple cases, but for unusual
### documents, you need to tune the parameters.
################################################################
def compute_separators_morph(binary,scale):
"""Finds vertical black lines corresponding to column separators."""
d0 = int(max(5,scale/4))
d1 = int(max(5,scale))+args['sepwiden']
thick = morph.r_dilation(binary,(d0,d1))
vert = morph.rb_opening(thick,(10*scale,1))
vert = morph.r_erosion(vert,(d0//2,args['sepwiden']))
vert = morph.select_regions(vert,sl.dim1,min=3,nbest=2*args['maxseps'])
vert = morph.select_regions(vert,sl.dim0,min=20*scale,nbest=args['maxseps'])
return vert
def compute_colseps_mconv(binary,scale=1.0):
"""Find column separators using a combination of morphological
operations and convolution."""
h,w = binary.shape
smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5))
smoothed = uniform_filter(smoothed,(5.0*scale,1))
thresh = (smoothed<amax(smoothed)*0.1)
DSAVE("1thresh",thresh)
blocks = morph.rb_closing(binary,(int(4*scale),int(4*scale)))
DSAVE("2blocks",blocks)
seps = minimum(blocks,thresh)
seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps'])
DSAVE("3seps",seps)
blocks = morph.r_dilation(blocks,(5,5))
DSAVE("4blocks",blocks)
seps = maximum(seps,1-blocks)
DSAVE("5combo",seps)
return seps
def compute_colseps_conv(binary,scale=1.0):
"""Find column separators by convoluation and
thresholding."""
h,w = binary.shape
# find vertical whitespace by thresholding
smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5))
smoothed = uniform_filter(smoothed,(5.0*scale,1))
thresh = (smoothed<amax(smoothed)*0.1)
DSAVE("1thresh",thresh)
# find column edges by filtering
grad = gaussian_filter(1.0*binary,(scale,scale*0.5),order=(0,1))
grad = uniform_filter(grad,(10.0*scale,1))
# grad = abs(grad) # use this for finding both edges
grad = (grad>0.5*amax(grad))
DSAVE("2grad",grad)
# combine edges and whitespace
seps = minimum(thresh,maximum_filter(grad,(int(scale),int(5*scale))))
seps = maximum_filter(seps,(int(2*scale),1))
DSAVE("3seps",seps)
# select only the biggest column separators
seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps'])
DSAVE("4seps",seps)
return seps
def compute_colseps(binary,scale):
"""Computes column separators either from vertical black lines or whitespace."""
#logger.info("considering at most %g whitespace column separators" % args['maxcolseps'])
colseps = compute_colseps_conv(binary,scale)
DSAVE("colwsseps",0.7*colseps+0.3*binary)
if args['maxseps']>0:
#logger.info("considering at most %g black column separators" % args['maxseps'])
seps = compute_separators_morph(binary,scale)
DSAVE("colseps",0.7*seps+0.3*binary)
colseps = maximum(colseps,seps)
binary = minimum(binary,1-seps)
return colseps,binary
################################################################
### Text Line Finding.
###
### This identifies the tops and bottoms of text lines by
### computing gradients and performing some adaptive thresholding.
### Those components are then used as seeds for the text lines.
################################################################
def compute_gradmaps(binary,scale):
# use gradient filtering to find baselines
boxmap = psegutils.compute_boxmap(binary,scale)
cleaned = boxmap*binary
DSAVE("cleaned",cleaned)
if args['usegauss']:
# this uses Gaussians
grad = gaussian_filter(1.0*cleaned,(args['vscale']*0.3*scale,
args['hscale']*6*scale),order=(1,0))
else:
# this uses non-Gaussian oriented filters
grad = gaussian_filter(1.0*cleaned,(max(4,args['vscale']*0.3*scale),
args['hscale']*scale),order=(1,0))
grad = uniform_filter(grad,(args['vscale'],args['hscale']*6*scale))
bottom = ocrolib.norm_max((grad<0)*(-grad))
top = ocrolib.norm_max((grad>0)*grad)
return bottom,top,boxmap
def compute_line_seeds(binary,bottom,top,colseps,scale):
"""Base on gradient maps, computes candidates for baselines
and xheights. Then, it marks the regions between the two
as a line seed."""
t = args['threshold']
vrange = int(args['vscale']*scale)
bmarked = maximum_filter(bottom==maximum_filter(bottom,(vrange,0)),(2,2))
bmarked = bmarked*(bottom>t*amax(bottom)*t)*(1-colseps)
tmarked = maximum_filter(top==maximum_filter(top,(vrange,0)),(2,2))
tmarked = tmarked*(top>t*amax(top)*t/2)*(1-colseps)
tmarked = maximum_filter(tmarked,(1,20))
seeds = zeros(binary.shape,'i')
delta = max(3,int(scale/2))
for x in range(bmarked.shape[1]):
transitions = sorted([(y,1) for y in find(bmarked[:,x])]+[(y,0) for y in find(tmarked[:,x])])[::-1]
transitions += [(0,0)]
for l in range(len(transitions)-1):
y0,s0 = transitions[l]
if s0==0: continue
seeds[y0-delta:y0,x] = 1
y1,s1 = transitions[l+1]
if s1==0 and (y0-y1)<5*scale: seeds[y1:y0,x] = 1
seeds = maximum_filter(seeds,(1,int(1+scale)))
seeds = seeds*(1-colseps)
DSAVE("lineseeds",[seeds,0.3*tmarked+0.7*bmarked,binary])
seeds,_ = morph.label(seeds)
return seeds
################################################################
### The complete line segmentation process.
################################################################
def remove_hlines(binary,scale,maxsize=10):
labels,_ = morph.label(binary)
objects = morph.find_objects(labels)
for i,b in enumerate(objects):
if sl.width(b)>maxsize*scale:
labels[b][labels[b]==i+1] = 0
return array(labels!=0,'B')
def compute_segmentation(binary,scale):
"""Given a binary image, compute a complete segmentation into
lines, computing both columns and text lines."""
binary = array(binary,'B')
# start by removing horizontal black lines, which only
# interfere with the rest of the page segmentation
binary = remove_hlines(binary,scale)
# do the column finding
#if not args['quiet']: logger.info("computing column separators")
colseps,binary = compute_colseps(binary,scale)
# now compute the text line seeds
#if not args['quiet']: logger.info("computing lines")
bottom,top,boxmap = compute_gradmaps(binary,scale)
seeds = compute_line_seeds(binary,bottom,top,colseps,scale)
DSAVE("seeds",[bottom,top,boxmap])
# spread the text line seeds to all the remaining
# components
#if not args['quiet']: logger.info("propagating labels")
llabels = morph.propagate_labels(boxmap,seeds,conflict=0)
#if not args['quiet']: logger.info("spreading labels")
spread = morph.spread_labels(seeds,maxdist=scale)
llabels = where(llabels>0,llabels,spread*binary)
segmentation = llabels*binary
return segmentation
################################################################
### Processing each file.
################################################################
def process(image):
imagename_base, ext = os.path.splitext(str(image))
try:
binary = ocrolib.read_image_binary(image)
except IOError:
if ocrolib.trace: traceback.logger.exc()
logger.error("cannot open %s" % (image))
return
checktype(binary,ABINARY2)
if not args['nocheck']:
check = check_page(amax(binary)-binary)
if check is not None:
logger.error("%s SKIPPED %s (use -n to disable this check)" % (image, check))
return
binary = 1-binary # invert
if args['scale']==0:
scale = psegutils.estimate_scale(binary)
else:
scale = args['scale']
#logger.info("scale %f" % (scale))
if isnan(scale) or scale>1000.0:
logger.error("%s: bad scale (%g); skipping\n" % (image, scale))
return
if scale<args['minscale']:
logger.error("%s: scale (%g) less than --minscale; skipping\n" % (image, scale))
return
# find columns and text lines
#if not args['quiet']:
#logger.info("computing segmentation")
segmentation = compute_segmentation(binary,scale)
if amax(segmentation)>args['maxlines']:
logger.error("%s: too many lines %g" % (image, amax(segmentation)))
return
#if not args['quiet']:
#logger.info("number of lines %g" % amax(segmentation))
# compute the reading order
#if not args['quiet']:
#logger.info("finding reading order")
lines = psegutils.compute_lines(segmentation,scale)
order = psegutils.reading_order([l.bounds for l in lines])
lsort = psegutils.topsort(order)
# renumber the labels so that they conform to the specs
nlabels = amax(segmentation)+1
renumber = zeros(nlabels,'i')
for i,v in enumerate(lsort): renumber[lines[v].label] = 0x010000+(i+1)
segmentation = renumber[segmentation]
# finally, output everything
#if not args['quiet']:
#logger.info("writing lines")
lines = [lines[i] for i in lsort]
cleaned = ocrolib.remove_noise(binary,args['noise'])
### Return image objects dictionary (in memory)
output_dic = {} # key: single-line image name. value: single-line image object
for index, line in enumerate(lines):
binline = psegutils.extract_masked(1-cleaned,line,pad=args['pad'],expand=args['expand'])
assert binline.ndim==2
midrange = 0.5*(amin(binline)+amax(binline))
image_array = array(255*(binline>midrange),'B')
image_pil = ocrolib.array2pil(image_array)
key = imagename_base + "_%d.png" % (index+1)
output_dic[key] = image_pil
#logger.info("%6d %s %4.1f %d" % (i, image, scale, len(lines)))
return output_dic
|
|
from collections import defaultdict
from typing import Any, Dict, List, Union
from ..users.projector import get_user_name
from ..utils.projector import (
AllData,
ProjectorElementException,
get_config,
register_projector_slide,
)
# Important: All functions have to be prune. This means, that thay can only
# access the data, that they get as argument and do not have any
# side effects. They are called from an async context. So they have
# to be fast!
def get_sorted_agenda_items(all_data: AllData) -> List[Dict[str, Any]]:
"""
Returns all sorted agenda items by id first and then weight, resulting in
ordered items, if some have the same weight.
"""
return sorted(
sorted(all_data["agenda/item"].values(), key=lambda item: item["id"]),
key=lambda item: item["weight"],
)
def get_flat_tree(all_data: AllData, parent_id: int = 0) -> List[Dict[str, Any]]:
"""
Build the item tree from all_data.
Only build the tree from elements unterneath parent_id.
Returns a list of two element tuples where the first element is the item title
and the second a List with children as two element tuples.
"""
# Build a dict from an item_id to all its children
children: Dict[int, List[int]] = defaultdict(list)
if "agenda/item" in all_data:
for item in get_sorted_agenda_items(all_data):
if item["type"] == 1: # only normal items
children[item["parent_id"] or 0].append(item["id"])
tree = []
def get_children(item_ids: List[int], depth: int) -> None:
for item_id in item_ids:
tree.append(
{
"item_number": all_data["agenda/item"][item_id]["item_number"],
"title_information": all_data["agenda/item"][item_id][
"title_information"
],
"collection": all_data["agenda/item"][item_id]["content_object"][
"collection"
],
"depth": depth,
}
)
get_children(children[item_id], depth + 1)
get_children(children[parent_id], 0)
return tree
def item_list_slide(
all_data: AllData, element: Dict[str, Any], projector_id: int
) -> Dict[str, Any]:
"""
Item list slide.
Returns all root items or all children of an item.
"""
only_main_items = element.get("only_main_items", True)
if only_main_items:
agenda_items = []
for item in get_sorted_agenda_items(all_data):
if item["parent_id"] is None and item["type"] == 1:
agenda_items.append(
{
"item_number": item["item_number"],
"title_information": item["title_information"],
"collection": item["content_object"]["collection"],
}
)
else:
agenda_items = get_flat_tree(all_data)
return {"items": agenda_items}
def list_of_speakers_slide(
all_data: AllData, element: Dict[str, Any], projector_id: int
) -> Dict[str, Any]:
"""
List of speakers slide.
Returns all usernames, that are on the list of speaker of a slide.
"""
item_id = element.get("id")
if item_id is None:
raise ProjectorElementException("id is required for list of speakers slide")
return get_list_of_speakers_slide_data(all_data, item_id)
def get_list_of_speakers_slide_data(all_data: AllData, item_id: int) -> Dict[str, Any]:
try:
item = all_data["agenda/item"][item_id]
except KeyError:
raise ProjectorElementException(f"Item {item_id} does not exist")
# Partition speaker objects to waiting, current and finished
speakers_waiting = []
speakers_finished = []
current_speaker = None
for speaker in item["speakers"]:
user = get_user_name(all_data, speaker["user_id"])
formatted_speaker = {
"user": user,
"marked": speaker["marked"],
"weight": speaker["weight"],
"end_time": speaker["end_time"],
}
if speaker["begin_time"] is None and speaker["end_time"] is None:
speakers_waiting.append(formatted_speaker)
elif speaker["begin_time"] is not None and speaker["end_time"] is None:
current_speaker = formatted_speaker
else:
speakers_finished.append(formatted_speaker)
# sort speakers
speakers_waiting = sorted(speakers_waiting, key=lambda s: s["weight"])
speakers_finished = sorted(speakers_finished, key=lambda s: s["end_time"])
number_of_last_speakers = get_config(all_data, "agenda_show_last_speakers")
if number_of_last_speakers == 0:
speakers_finished = []
else:
speakers_finished = speakers_finished[
-number_of_last_speakers:
] # Take the last speakers
return {
"waiting": speakers_waiting,
"current": current_speaker,
"finished": speakers_finished,
"content_object_collection": item["content_object"]["collection"],
"title_information": item["title_information"],
"item_number": item["item_number"],
}
def get_current_item_id_for_projector(
all_data: AllData, projector: Dict[str, Any]
) -> Union[int, None]:
"""
Search for elements, that do have an agenda item:
Try to get a model by the collection and id in the element. This
model needs to have a 'agenda_item_id'. This item must exist. The first
matching element is taken.
"""
elements = projector["elements"]
item_id = None
for element in elements:
if "id" not in element:
continue
collection = element["name"]
id = element["id"]
if collection not in all_data or id not in all_data[collection]:
continue
model = all_data[collection][id]
if "agenda_item_id" not in model:
continue
if not model["agenda_item_id"] in all_data["agenda/item"]:
continue
item_id = model["agenda_item_id"]
break
return item_id
def get_reference_projector(all_data: AllData, projector_id: int) -> Dict[str, Any]:
"""
Returns the reference projector to the given projector (by id)
"""
try:
this_projector = all_data["core/projector"][projector_id]
except KeyError:
raise ProjectorElementException(f"Projector {projector_id} does not exist")
reference_projector_id = this_projector["reference_projector_id"] or projector_id
try:
reference_projector = all_data["core/projector"][reference_projector_id]
except KeyError:
raise ProjectorElementException(
f"Projector {reference_projector_id} does not exist"
)
return reference_projector
def current_list_of_speakers_slide(
all_data: AllData, element: Dict[str, Any], projector_id: int
) -> Dict[str, Any]:
"""
The current list of speakers slide. Creates the data for the given projector.
"""
reference_projector = get_reference_projector(all_data, projector_id)
item_id = get_current_item_id_for_projector(all_data, reference_projector)
if item_id is None: # no element found
return {}
return get_list_of_speakers_slide_data(all_data, item_id)
def current_speaker_chyron_slide(
all_data: AllData, element: Dict[str, Any], projector_id: int
) -> Dict[str, Any]:
"""
Returns the username for the current speaker.
"""
reference_projector = get_reference_projector(all_data, projector_id)
item_id = get_current_item_id_for_projector(all_data, reference_projector)
if item_id is None: # no element found
return {}
# get item
try:
item = all_data["agenda/item"][item_id]
except KeyError:
raise ProjectorElementException(f"Item {item_id} does not exist")
# find current speaker
current_speaker = None
for speaker in item["speakers"]:
if speaker["begin_time"] is not None and speaker["end_time"] is None:
current_speaker = get_user_name(all_data, speaker["user_id"])
return {"current_speaker": current_speaker}
def register_projector_slides() -> None:
register_projector_slide("agenda/item-list", item_list_slide)
register_projector_slide("agenda/list-of-speakers", list_of_speakers_slide)
register_projector_slide(
"agenda/current-list-of-speakers", current_list_of_speakers_slide
)
register_projector_slide(
"agenda/current-list-of-speakers-overlay", current_list_of_speakers_slide
)
register_projector_slide(
"agenda/current-speaker-chyron", current_speaker_chyron_slide
)
|
|
import pytest
from marshmallow import Schema, fields
from sqlalchemy import Column, Integer, String, sql
from flask_resty import Api, FixedSorting, GenericModelView, Sorting
from flask_resty.testing import assert_response
# -----------------------------------------------------------------------------
@pytest.fixture
def models(db):
class Widget(db.Model):
__tablename__ = "widgets"
id = Column(Integer, primary_key=True)
name = Column(String)
content = Column(String)
size = Column(Integer)
db.create_all()
yield {"widget": Widget}
db.drop_all()
@pytest.fixture
def schemas():
class WidgetSchema(Schema):
id = fields.Integer(as_string=True)
name = fields.String()
content = fields.String()
size = fields.Integer()
return {"widget": WidgetSchema()}
@pytest.fixture(autouse=True)
def routes(app, models, schemas):
Widget = models["widget"]
class WidgetListView(GenericModelView):
model = models["widget"]
schema = schemas["widget"]
sorting = Sorting(
"name",
"size",
content_length=sql.func.length(Widget.content),
content_length2=lambda model, field_name: sql.func.length(
model.content
),
)
def get(self):
return self.list()
class FixedWidgetListView(WidgetListView):
sorting = FixedSorting("name,size")
def get(self):
return self.list()
api = Api(app)
api.add_resource("/widgets", WidgetListView)
api.add_resource("/fixed_widgets", FixedWidgetListView)
@pytest.fixture(autouse=True)
def data(db, models):
db.session.add_all(
(
models["widget"](name="Foo", size=1, content="Some bold text"),
models["widget"](name="Foo", size=5, content="Short"),
models["widget"](
name="Baz", size=3, content="LorumLorumLorumLorum"
),
)
)
db.session.commit()
# -----------------------------------------------------------------------------
def test_single(client):
response = client.get("/widgets?sort=size")
assert_response(
response,
200,
[
{"id": "1", "name": "Foo", "size": 1},
{"id": "3", "name": "Baz", "size": 3},
{"id": "2", "name": "Foo", "size": 5},
],
)
def test_many(client):
response = client.get("/widgets?sort=name,-size")
assert_response(
response,
200,
[
{"id": "3", "name": "Baz", "size": 3},
{"id": "2", "name": "Foo", "size": 5},
{"id": "1", "name": "Foo", "size": 1},
],
)
def test_no_sort(client):
response = client.get("/widgets")
assert_response(
response,
200,
[
{"id": "1", "name": "Foo", "size": 1},
{"id": "2", "name": "Foo", "size": 5},
{"id": "3", "name": "Baz", "size": 3},
],
)
def test_fixed(client):
response = client.get("/fixed_widgets")
assert_response(
response,
200,
[
{"id": "3", "name": "Baz", "size": 3},
{"id": "1", "name": "Foo", "size": 1},
{"id": "2", "name": "Foo", "size": 5},
],
)
def test_custom_expression(client):
response = client.get("/widgets?sort=content_length")
assert_response(
response,
200,
[
{"id": "2", "name": "Foo", "content": "Short"},
{"id": "1", "name": "Foo", "content": "Some bold text"},
{"id": "3", "name": "Baz", "content": "LorumLorumLorumLorum"},
],
)
def test_custom_callable(client):
response = client.get("/widgets?sort=content_length2")
assert_response(
response,
200,
[
{"id": "2", "name": "Foo", "content": "Short"},
{"id": "1", "name": "Foo", "content": "Some bold text"},
{"id": "3", "name": "Baz", "content": "LorumLorumLorumLorum"},
],
)
def test_multiple_named_and_expression_sorts(client):
response = client.get("/widgets?sort=name,content_length")
assert_response(
response,
200,
[
{"id": "3", "name": "Baz", "content": "LorumLorumLorumLorum"},
{"id": "2", "name": "Foo", "content": "Short"},
{"id": "1", "name": "Foo", "content": "Some bold text"},
],
)
# -----------------------------------------------------------------------------
def test_error_invalid_field(client):
response = client.get("/widgets?sort=id")
assert_response(
response,
400,
[{"code": "invalid_sort", "source": {"parameter": "sort"}}],
)
def test_error_empty(client):
response = client.get("/widgets?sort=")
assert_response(
response,
400,
[{"code": "invalid_sort", "source": {"parameter": "sort"}}],
)
def test_duplicate_fields(client):
with pytest.raises(
ValueError,
match="Sort field\\(s\\) cannot be passed as both positional and keyword arguments",
):
Sorting("name", "date", date=True)
|
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apple SUS shared functions."""
import datetime
import logging
import re
import xml
from xml.dom import minidom
from google.appengine.api import taskqueue
from google.appengine.ext import deferred
from simian.mac.common import datastore_locks
from simian import settings
from simian.mac import common
from simian.mac import models
from simian.mac.models import constants
from simian.mac.munki import plist
OS_VERSIONS = frozenset(['10.9', '10.10', '10.11', '10.12', '10.13', '10.14'])
_CATALOG_REGENERATION_LOCK_NAME = 'applesus_catalog_regeneration_%s_%s'
MON, TUE, WED, THU, FRI, SAT, SUN = range(0, 7)
def CatalogRegenerationLockName(track, os_version):
return _CATALOG_REGENERATION_LOCK_NAME % (
track, os_version.replace('.', '-'))
class Error(Exception):
"""Base error."""
class DocumentFormatError(Error):
"""Error in document format."""
class DistFileDocument(object):
"""Class to hold a Apple SUS distfile document."""
def __init__(self):
"""Initializer."""
self.Reset()
def Reset(self):
"""Reset variables."""
self.description = None
self.restart_required = None
self.server_comment = None
self.softwareupdate_name = None
self.title = None
self.version = None
self._installer_script = {}
def _ParseInstallerScriptString(self, istr):
"""Parse an installer script string and return its key/value pairs.
The installer script string appears generally as
"KEY" = "VALUE"
and can contain multiple lines. Apparently the quoting chars can be
double or single quotes, and the alternative quote char is allowed as
a literal inside the other.
Standard javascript-style comments are permitted.
Poorly formed lines will disrupt the parser and incomplete/no values
will be returned.
For example:
// This comment is OK
"KEY" = "VALUE";
"KEY2" = "VALUE2";
// Here's another comment later on.
"KEY3" = 'VALUE3
VALUE3MORE "THIS IS VALID"
';
Or, consider:
"KEY" = ; # this will break the parser
"NOTFOUNDKEY" = "NEVER GET HERE";
Args:
istr: str, see above format example above.
Returns:
dict
"""
installer_script = {}
kv_split = re.compile(
(r'(?:^//[^\n]*$)|'
'(?:^"(\w+)"\s*=\s*([\"\'])([^\\2]*?)\\2;$)'),
re.MULTILINE | re.DOTALL)
for i in re.finditer(kv_split, istr):
if i.group(1):
installer_script[i.group(1)] = i.group(3)
return installer_script
def LoadDocument(self, distfile_xml):
"""Load an entire distfile XML document and parse it.
Args:
distfile_xml: str, xml document
Raises:
DocumentFormatError: the XML document is malformed.
"""
try:
p = minidom.parseString(distfile_xml)
except xml.parsers.expat.ExpatError, e:
raise DocumentFormatError(str(e))
try:
l = p.getElementsByTagName('localization')[0]
s = p.getElementsByTagName('strings')[0]
cdata = []
for cn in s.childNodes:
cdata.append(cn.nodeValue)
cdata = ''.join(cdata)
except IndexError:
raise DocumentFormatError
# TODO(user): intead of regex, parse XML.
self.restart_required = re.search(
r'onConclusion=("|\')RequireRestart("|\')', distfile_xml) is not None
swupd_name_match = re.search(
r'suDisabledGroupID=("|\')([\w\s\.-]*)("|\')', distfile_xml)
if swupd_name_match:
self.softwareupdate_name = swupd_name_match.group(2)
self._installer_script = self._ParseInstallerScriptString(cdata)
self.description = self._installer_script.get('SU_DESCRIPTION')
self.server_comment = self._installer_script.get('SU_SERVERCOMMENT')
self.title = self._installer_script.get('SU_TITLE')
self.version = (self._installer_script.get('SU_VERS') or
self._installer_script.get('SU_VERSION'))
def GenerateAppleSUSCatalogs(track=None, tracks=None, delay=0):
"""Generates Apple SUS catalogs for a given track, set of tracks, or all.
Note: this generates tracks for all os_versions on the given track/tracks.
Args:
track: string track to generate catalog for. OR,
tracks: list of string tracks.
delay: int. if > 0, defer generating the catalogs by this many seconds.
"""
if track and tracks:
raise ValueError('only one of track and tracks is allowed')
elif not tracks and not track:
tracks = common.TRACKS
elif track:
tracks = [track]
for track in tracks:
for os_version in OS_VERSIONS:
lock_name = CatalogRegenerationLockName(track, os_version)
lock = datastore_locks.DatastoreLock(lock_name)
try:
lock.Acquire(timeout=600 + delay, max_acquire_attempts=1)
except datastore_locks.AcquireLockError:
continue
if delay:
now_str = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
deferred_name = 'gen-applesus-catalog-%s-%s-%s' % (
os_version, track, now_str)
deferred_name = re.sub(r'[^\w-]', '', deferred_name)
try:
deferred.defer(
GenerateAppleSUSCatalog, os_version, track, catalog_lock=lock,
_countdown=delay, _name=deferred_name)
except taskqueue.TaskAlreadyExistsError:
logging.info('Skipping duplicate Apple SUS Catalog generation task.')
else:
GenerateAppleSUSCatalog(os_version, track, catalog_lock=lock)
if delay:
now_str = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
deferred_name = 'gen-sus-metadata-catalog-%s' % now_str
deferred_name = re.sub(r'[^\w-]', '', deferred_name)
try:
deferred.defer(
GenerateAppleSUSMetadataCatalog, _name=deferred_name)
except taskqueue.TaskAlreadyExistsError:
logging.info('Skipping duplicate Apple SUS Catalog generation task.')
else:
GenerateAppleSUSMetadataCatalog()
def GenerateAppleSUSCatalog(
os_version, track, datetime_=datetime.datetime, catalog_lock=None):
"""Generates an Apple SUS catalog for a given os_version and track.
This function loads the untouched/raw Apple SUS catalog, removes any
products/updates that are not approved for the given track, then saves
a new catalog (plist/xml) to Datastore for client consumption.
Args:
os_version: str OS version to generate the catalog for.
track: str track name to generate the catalog for.
datetime_: datetime module; only used for stub during testing.
catalog_lock: datastore_lock.DatastoreLock; If provided, the lock to release
upon completion of the operation.
Returns:
tuple, new models.AppleSUSCatalog object and plist.ApplePlist object. Or,
if there is no "untouched" catalog for the os_version, then (None, None) is
returned.
"""
logging.info('Generating catalog: %s_%s', os_version, track)
catalog_key = '%s_untouched' % os_version
untouched_catalog_obj = models.AppleSUSCatalog.get_by_key_name(catalog_key)
if not untouched_catalog_obj:
logging.warning('Apple Update catalog does not exist: %s', catalog_key)
if catalog_lock:
catalog_lock.Release()
return None, None
untouched_catalog_plist = plist.ApplePlist(untouched_catalog_obj.plist)
untouched_catalog_plist.Parse()
approved_product_ids = set()
products_query = models.AppleSUSProduct.AllActive().filter('tracks =', track)
for product in products_query:
approved_product_ids.add(product.product_id)
product_ids = untouched_catalog_plist.get('Products', {}).keys()
new_plist = untouched_catalog_plist
for product_id in product_ids:
if product_id not in approved_product_ids:
del new_plist['Products'][product_id]
catalog_plist_xml = new_plist.GetXml()
# Save the catalog using a time-specific key for rollback purposes.
now = datetime_.utcnow()
now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
backup = models.AppleSUSCatalog(
key_name='backup_%s_%s_%s' % (os_version, track, now_str))
backup.plist = catalog_plist_xml
backup.put()
# Overwrite the catalog being served for this os_version/track pair.
c = models.AppleSUSCatalog(key_name='%s_%s' % (os_version, track))
c.plist = catalog_plist_xml
c.put()
if catalog_lock:
catalog_lock.Release()
return c, new_plist
def GenerateAppleSUSMetadataCatalog():
"""Generates the Apple SUS metadata catalog.
Returns:
The Catalog instance created.
"""
logging.info('Generating catalog: apple_update_metadata')
products = {}
# Currently, items need to exist in this catalog if they're unattended or
# have a force_install_after_date date set.
unattended = models.AppleSUSProduct.AllActive().filter('unattended =', True)
force_install_after_date = models.AppleSUSProduct.AllActive().filter(
'force_install_after_date !=', None)
for p in unattended:
products[p.product_id] = p
for p in force_install_after_date:
products[p.product_id] = p
catalog_plist_xml_fragments = [
p.plist.GetXmlContent() for p in products.values()]
catalog_plist_xml = constants.CATALOG_PLIST_XML % (
'\n'.join(catalog_plist_xml_fragments))
# Overwrite the catalog being served for this os_version/track pair.
c = models.Catalog(key_name='apple_update_metadata')
c.plist = catalog_plist_xml
c.put()
models.Catalog.DeleteMemcacheWrap(
'apple_update_metadata', prop_name='plist_xml')
return c
def GetAutoPromoteDate(track, applesus_product):
"""Returns a date of when a given update will auto-promote.
Args:
track: str track to get the auto-promote datetime for.
applesus_product: models.AppleSUSProduct object.
Returns:
datetime.date of when the Apple SUS update will be auto-promoted to track,
or None if the product will never be auto-promoted due to manual_override or
the product not being in the unstable track.
Raises:
ValueError: an invalid track was specified; only testing/stable supported.
"""
if not settings.APPLE_AUTO_PROMOTE_ENABLED:
return None
if applesus_product.manual_override:
return None
elif common.UNSTABLE not in applesus_product.tracks:
return None
if track == common.TESTING:
days = settings.APPLE_UNSTABLE_GRACE_PERIOD_DAYS
elif track == common.STABLE:
days = settings.APPLE_TESTING_GRACE_PERIOD_DAYS
else:
raise ValueError('Invalid track was specified: %s' % track)
auto_promote_offset = datetime.timedelta(days=days)
previous_track_date = applesus_product.mtime.date()
if track == common.TESTING:
auto_promote_date = previous_track_date + auto_promote_offset
if auto_promote_date.weekday() >= SAT: # Sat or Sun.
auto_promote_date = _GetNextWeekdayDate(
weekday=MON, min_date=auto_promote_date)
return auto_promote_date
# If we're looking for a stable auto-promotion date but the item is not yet in
# testing, then we need to first figure out when it will go to testing and set
# the previous_track_mtime to that.
if common.TESTING not in applesus_product.tracks:
previous_track_date = GetAutoPromoteDate('testing', applesus_product)
# Unstable should only promoted on Wednesdays and only after the grace period.
min_auto_promote_date = previous_track_date + auto_promote_offset
return _GetNextWeekdayDate(
weekday=settings.APPLE_AUTO_PROMOTE_STABLE_WEEKDAY,
min_date=min_auto_promote_date)
def _GetNextWeekdayDate(weekday, min_date=None):
"""Returns the date of the current or next weekday on or after min_date.
Args:
weekday: int weekday number, where Monday is 0 and Sunday is 6.
min_date: datetime.date object of the minimum date to find the weekday on
or after. default of None uses today as the minimum date.
Returns:
datetime.date object of the current or next desired weekday.
"""
if min_date is None:
min_date = datetime.datetime.utcnow().date()
next_date = min_date
if min_date.weekday() > weekday:
next_date += datetime.timedelta(7 - min_date.weekday() + weekday)
else:
next_date += datetime.timedelta(weekday - min_date.weekday())
return next_date
|
|
#!/usr/bin/env python
# coding=utf-8
import unittest
from ipapy import invalid_ipa_characters
from ipapy import ipa_substrings
from ipapy import is_valid_ipa
from ipapy import remove_invalid_ipa_characters
from ipapy import split_using_dictionary
class TestInit(unittest.TestCase):
def do_test(self, values, func):
for v, e in values:
self.assertEqual(func(v), e)
def test_ipa_substrings(self):
values = [
(None, None),
(u"", []),
(u"f", [u"f"]),
(u"foo", [u"f", u"o", u"o"]),
(u"\u0066\u02BCoo", [u"\u0066\u02BC", u"o", u"o"]), # single (\u0066 + \u02BC)
(u"f\u031Aoo", [u"f", u"\u031A", u"o", u"o"]),
(u"f\u006e\u0361\u006doo", [u"f", u"\u006e\u0361\u006d", u"o", u"o"]), # single (\u006e + \u0361 + \u006d)
(u"L", [u"L"]),
(u"LfM", [u"L", u"f", u"M"]),
(u"fLoMo", [u"f", u"L", u"o", u"M", u"o"]),
(u"L\u0066\u02BCMoo", [u"L", u"\u0066\u02BC", u"M", u"o", u"o"]),
(u"LfM\u02BCoo", [u"L", u"f", u"M", u"\u02BC", u"o", u"o"]),
(u"fL\u031AMoo", [u"f", u"L", u"\u031A", u"M", u"o", u"o"]),
(u"f\u006eL\u0361\u006doo", [u"f", u"\u006e", u"L", u"\u0361", u"\u006d", u"o", u"o"]),
]
self.do_test(values, ipa_substrings)
def test_ipa_substrings_single(self):
values = [
(None, None),
(u"", []),
(u"f", [u"f"]),
(u"foo", [u"f", u"o", u"o"]),
(u"\u0066\u02BCoo", [u"\u0066", u"\u02BC", u"o", u"o"]), # single (\u0066 + \u02BC)
(u"f\u031Aoo", [u"f", u"\u031A", u"o", u"o"]),
(u"f\u006e\u0361\u006doo", [u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"]), # single (\u006e + \u0361 + \u006d)
(u"L", ["L"]),
(u"LfM", [u"L", u"f", u"M"]),
(u"fLoMo", [u"f", u"L", u"o", u"M", u"o"]),
(u"L\u0066\u02BCMoo", [u"L", u"\u0066", u"\u02BC", u"M", u"o", u"o"]),
(u"LfM\u02BCoo", [u"L", u"f", u"M", u"\u02BC", u"o", u"o"]),
(u"fL\u031AMoo", [u"f", u"L", u"\u031A", u"M", u"o", u"o"]),
(u"f\u006eL\u0361\u006doo", [u"f", u"\u006e", u"L", u"\u0361", u"\u006d", u"o", u"o"]),
]
for v, e in values:
self.assertEqual(ipa_substrings(v, single_char_parsing=True), e)
def test_invalid_ipa_characters(self):
values = [
(None, None),
(u"", set([])),
(u"foo", set([])),
(u"L", set([u"L"])),
(u"LfM", set([u"L", u"M"])),
(u"fLoMo", set([u"L", u"M"])),
(u"L\u0066\u02BCMoo", set([u"L", u"M"])),
(u"LfM\u02BCoo", set([u"L", u"M"])),
(u"fL\u031AMoo", set([u"L", u"M"])),
(u"f\u006eL\u0361\u006doo", set([u"L"])),
]
self.do_test(values, invalid_ipa_characters)
def test_invalid_ipa_characters_indices(self):
values = [
(None, None),
(u"", []),
(u"foo", []),
(u"L", [(0, u"L")]),
(u"LfM", [(0, u"L"), (2, u"M")]),
(u"fLoMo", [(1, u"L"), (3, u"M")]),
(u"L\u0066\u02BCMoo", [(0, u"L"), (3, u"M")]),
(u"LfM\u02BCoo", [(0, u"L"), (2, u"M")]),
(u"fL\u031AMoo", [(1, u"L"), (3, u"M")]),
(u"f\u006eL\u0361\u006doo", [(2, u"L")]),
]
for v, e in values:
self.assertEqual(invalid_ipa_characters(v, indices=True), e)
def test_is_valid_ipa(self):
values = [
(None, None),
(u"", True),
(u"f", True),
(u"foo", True),
(u"\u0066\u02BCoo", True), # single (\u0066 + \u02BC)
(u"f\u031Aoo", True),
(u"f\u006e\u0361\u006doo", True), # single (\u006e + \u0361 + \u006d)
(u"L", False),
(u"LfM", False),
(u"fLoMo", False),
(u"L\u0066\u02BCMoo", False),
(u"LfM\u02BCoo", False),
(u"fL\u031AMoo", False),
(u"f\u006eL\u0361\u006doo", False),
]
self.do_test(values, is_valid_ipa)
def test_remove_invalid_ipa_characters(self):
values = [
(None, None),
(u"", []),
(u"f", [u"f"]),
(u"foo", [u"f", u"o", u"o"]),
(u"\u0066\u02BCoo", [u"\u0066\u02BC", u"o", u"o"]), # single (\u0066 + \u02BC)
(u"f\u031Aoo", [u"f", u"\u031A", u"o", u"o"]),
(u"f\u006e\u0361\u006doo", [u"f", u"\u006e\u0361\u006d", u"o", u"o"]), # single (\u006e + \u0361 + \u006d)
(u"L", []),
(u"LfM", [u"f"]),
(u"fLoMo", [u"f", u"o", u"o"]),
(u"L\u0066\u02BCMoo", [u"\u0066\u02BC", u"o", u"o"]),
(u"LfM\u02BCoo", [u"f", u"\u02BC", u"o", u"o"]),
(u"fL\u031AMoo", [u"f", u"\u031A", u"o", u"o"]),
(u"f\u006eL\u0361\u006doo", [u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"]),
]
self.do_test(values, remove_invalid_ipa_characters)
def test_remove_invalid_ipa_characters_single(self):
values = [
(None, None),
(u"", []),
(u"f", [u"f"]),
(u"foo", [u"f", u"o", u"o"]),
(u"\u0066\u02BCoo", [u"\u0066", u"\u02BC", u"o", u"o"]), # single (\u0066 + \u02BC)
(u"f\u031Aoo", [u"f", u"\u031A", u"o", u"o"]),
(u"f\u006e\u0361\u006doo", [u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"]), # single (\u006e + \u0361 + \u006d)
(u"L", []),
(u"LfM", [u"f"]),
(u"fLoMo", [u"f", u"o", u"o"]),
(u"L\u0066\u02BCMoo", [u"\u0066", u"\u02BC", u"o", u"o"]),
(u"LfM\u02BCoo", [u"f", u"\u02BC", u"o", u"o"]),
(u"fL\u031AMoo", [u"f", u"\u031A", u"o", u"o"]),
(u"f\u006eL\u0361\u006doo", [u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"]),
]
for v, e in values:
self.assertEqual(remove_invalid_ipa_characters(v, single_char_parsing=True), e)
def test_remove_invalid_ipa_characters_invalid(self):
values = [
(None, None),
(u"", ([], [])),
(u"f", ([u"f"], [])),
(u"foo", ([u"f", u"o", u"o"], [])),
(u"\u0066\u02BCoo", ([u"\u0066\u02BC", u"o", u"o"], [])), # single (\u0066 + \u02BC)
(u"f\u031Aoo", ([u"f", u"\u031A", u"o", u"o"], [])),
(u"f\u006e\u0361\u006doo", ([u"f", u"\u006e\u0361\u006d", u"o", u"o"], [])), # single (\u006e + \u0361 + \u006d)
(u"L", ([], [u"L"])),
(u"LfM", ([u"f"], [u"L", u"M"])),
(u"fLoMo", ([u"f", u"o", u"o"], [u"L", u"M"])),
(u"L\u0066\u02BCMoo", ([u"\u0066\u02BC", u"o", u"o"], [u"L", u"M"])),
(u"LfM\u02BCoo", ([u"f", u"\u02BC", u"o", u"o"], [u"L", u"M"])),
(u"fL\u031AMoo", ([u"f", u"\u031A", u"o", u"o"], [u"L", u"M"])),
(u"f\u006eL\u0361\u006doo", ([u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"], [u"L"])),
]
for v, e in values:
self.assertEqual(remove_invalid_ipa_characters(v, return_invalid=True), e)
def test_remove_invalid_ipa_characters_invalid_single(self):
values = [
(None, None),
(u"", ([], [])),
(u"f", ([u"f"], [])),
(u"foo", ([u"f", u"o", u"o"], [])),
(u"\u0066\u02BCoo", ([u"\u0066", u"\u02BC", u"o", u"o"], [])), # single (\u0066 + \u02BC)
(u"f\u031Aoo", ([u"f", u"\u031A", u"o", u"o"], [])),
(u"f\u006e\u0361\u006doo", ([u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"], [])), # single (\u006e + \u0361 + \u006d)
(u"L", ([], [u"L"])),
(u"LfM", ([u"f"], [u"L", u"M"])),
(u"fLoMo", ([u"f", u"o", u"o"], [u"L", u"M"])),
(u"L\u0066\u02BCMoo", ([u"\u0066", u"\u02BC", u"o", u"o"], [u"L", u"M"])),
(u"LfM\u02BCoo", ([u"f", u"\u02BC", u"o", u"o"], [u"L", u"M"])),
(u"fL\u031AMoo", ([u"f", u"\u031A", u"o", u"o"], [u"L", u"M"])),
(u"f\u006eL\u0361\u006doo", ([u"f", u"\u006e", u"\u0361", u"\u006d", u"o", u"o"], [u"L"])),
]
for v, e in values:
self.assertEqual(remove_invalid_ipa_characters(v, return_invalid=True, single_char_parsing=True), e)
def test_split_using_dictionary(self):
d = dict()
d[u"a"] = 1
d[u"ba"] = 2
d[u"b"] = 3
d[u"c"] = 4
d[u"ca"] = 5
values = [
(None, None),
(u"", []),
(u"aza", [u"a", u"z", u"a"]),
(u"aaba", [u"a", u"a", u"ba"]),
(u"acaba", [u"a", u"ca", u"ba"]),
]
for v, e in values:
self.assertEqual(split_using_dictionary(v, d, 2, single_char_parsing=False), e)
def test_split_using_dictionary_single(self):
d = dict()
d[u"a"] = 1
d[u"ba"] = 2
d[u"b"] = 3
d[u"c"] = 4
d[u"ca"] = 5
values = [
(None, None),
(u"", []),
(u"aza", [u"a", u"z", u"a"]),
(u"aaba", [u"a", u"a", u"b", u"a"]),
(u"acaba", [u"a", u"c", u"a", u"b", u"a"]),
]
for v, e in values:
self.assertEqual(split_using_dictionary(v, d, 2, single_char_parsing=True), e)
def test_split_using_dictionary_key_one(self):
d = dict()
d[u"a"] = 1
d[u"b"] = 2
d[u"c"] = 4
values = [
(None, None),
(u"", []),
(u"aza", [u"a", u"z", u"a"]),
(u"aaba", [u"a", u"a", u"b", u"a"]),
(u"acaba", [u"a", u"c", u"a", u"b", u"a"]),
]
for v, e in values:
self.assertEqual(split_using_dictionary(v, d, 1, single_char_parsing=False), e)
def test_split_using_dictionary_key_one_single(self):
d = dict()
d[u"a"] = 1
d[u"b"] = 2
d[u"c"] = 4
values = [
(None, None),
(u"", []),
(u"aza", [u"a", u"z", u"a"]),
(u"aaba", [u"a", u"a", u"b", u"a"]),
(u"acaba", [u"a", u"c", u"a", u"b", u"a"]),
]
for v, e in values:
self.assertEqual(split_using_dictionary(v, d, 1, single_char_parsing=True), e)
|
|
"""The following methods may be used to describe the fit between the model
simulation and the observations.
.. currentmodule:: pastas.modelstats.Statistics
.. autosummary::
:nosignatures:
:toctree: ./generated
summary
Examples
========
These methods may be used as follows.
>>> ml.stats.summary(stats=["rmse", "mae", "nse"])
Value
Statistic
rmse 0.114364
mae 0.089956
nse 0.929136
"""
from numpy import nan
from pandas import DataFrame
from .decorators import model_tmin_tmax
from .stats import diagnostics, metrics
class Statistics:
# Save all statistics that can be calculated.
ops = ["rmse", "rmsn", "sse", "mae", "nse", "evp", "rsq", "bic", "aic", ]
def __init__(self, ml):
"""This class provides statistics to to pastas Model class.
Parameters
----------
ml: Pastas.model.Model
ml is a time series Model that is calibrated.
Notes
-----
To obtain a list of all statistics that are included type:
>>> print(ml.stats.ops)
"""
# Save a reference to the model.
self.ml = ml
def __repr__(self):
msg = """This module contains all the statistical functions that are
included in Pastas. To obtain a list of all statistics that are included type:
>>> print(ml.stats.ops)"""
return msg
@model_tmin_tmax
def rmse(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Root mean squared error of the residuals.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.rmse
"""
res = self.ml.residuals(tmin=tmin, tmax=tmax)
return metrics.rmse(res=res, weighted=weighted, **kwargs)
@model_tmin_tmax
def rmsn(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Root mean squared error of the noise.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
Returns
-------
float or nan
Return a float if noisemodel is present, nan if not.
See Also
--------
pastas.stats.rmse
"""
if not self.ml.settings["noise"]:
return nan
else:
res = self.ml.noise(tmin=tmin, tmax=tmax)
return metrics.rmse(res=res, weighted=weighted, **kwargs)
@model_tmin_tmax
def sse(self, tmin=None, tmax=None):
"""Sum of the squares of the error (SSE)
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
See Also
--------
pastas.stats.sse
"""
res = self.ml.residuals(tmin=tmin, tmax=tmax)
return metrics.sse(res=res)
@model_tmin_tmax
def mae(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Mean Absolute Error (MAE) of the residuals.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.mae
"""
res = self.ml.residuals(tmin=tmin, tmax=tmax)
return metrics.mae(res=res, weighted=weighted, **kwargs)
@model_tmin_tmax
def nse(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Nash-Sutcliffe coefficient for model fit .
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.nse
"""
res = self.ml.residuals(tmin=tmin, tmax=tmax)
obs = self.ml.observations(tmin=tmin, tmax=tmax)
return metrics.nse(obs=obs, res=res, weighted=weighted, **kwargs)
@model_tmin_tmax
def pearsonr(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Compute the (weighted) Pearson correlation (r).
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.pearsonr
"""
obs = self.ml.observations(tmin=tmin, tmax=tmax)
sim = self.ml.simulate(tmin=tmin, tmax=tmax)
return metrics.pearsonr(obs=obs, sim=sim, weighted=weighted, **kwargs)
@model_tmin_tmax
def evp(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Explained variance percentage.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.evp
"""
res = self.ml.residuals(tmin=tmin, tmax=tmax)
obs = self.ml.observations(tmin=tmin, tmax=tmax)
return metrics.evp(obs=obs, res=res, weighted=weighted, **kwargs)
@model_tmin_tmax
def rsq(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""R-squared.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.rsq
"""
obs = self.ml.observations(tmin=tmin, tmax=tmax)
res = self.ml.residuals(tmin=tmin, tmax=tmax)
return metrics.rsq(obs=obs, res=res, weighted=weighted, **kwargs)
@model_tmin_tmax
def kge_2012(self, tmin=None, tmax=None, weighted=False, **kwargs):
"""Kling-Gupta Efficiency.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
See Also
--------
pastas.stats.kge_2012
"""
sim = self.ml.simulate(tmin=tmin, tmax=tmax)
obs = self.ml.observations(tmin=tmin, tmax=tmax)
return metrics.kge_2012(obs=obs, sim=sim, weighted=weighted, **kwargs)
@model_tmin_tmax
def bic(self, tmin=None, tmax=None):
"""Bayesian Information Criterium (BIC).
Parameters
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
See Also
--------
pastas.stats.bic
"""
nparam = self.ml.parameters["vary"].sum()
if self.ml.settings["noise"]:
res = (self.ml.noise(tmin=tmin, tmax=tmax) *
self.ml.noise_weights(tmin=tmin, tmax=tmax))
else:
res = self.ml.residuals(tmin=tmin, tmax=tmax)
return metrics.bic(res=res, nparam=nparam)
@model_tmin_tmax
def aic(self, tmin=None, tmax=None):
"""Akaike Information Criterium (AIC).
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
See Also
--------
pastas.stats.bic
"""
nparam = self.ml.parameters["vary"].sum()
if self.ml.settings["noise"]:
res = (self.ml.noise(tmin=tmin, tmax=tmax) *
self.ml.noise_weights(tmin=tmin, tmax=tmax))
else:
res = self.ml.residuals(tmin=tmin, tmax=tmax)
return metrics.aic(res=res, nparam=nparam)
@model_tmin_tmax
def summary(self, tmin=None, tmax=None, stats=None):
"""Returns a Pandas DataFrame with goodness-of-fit metrics.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
stats: list, optional
list of statistics that need to be calculated. If nothing is
provided, all statistics are returned.
Returns
-------
stats : Pandas.DataFrame
single-column DataFrame with calculated statistics
Examples
--------
>>> ml.stats.summary()
or
>>> ml.stats.summary(stats=["mae", "rmse"])
"""
if stats is None:
stats_to_compute = self.ops
else:
stats_to_compute = stats
stats = DataFrame(columns=['Value'])
for k in stats_to_compute:
stats.loc[k] = (getattr(self, k)(tmin=tmin, tmax=tmax))
stats.index.name = 'Statistic'
return stats
@model_tmin_tmax
def diagnostics(self, tmin=None, tmax=None, alpha=0.05, stats=(),
float_fmt="{0:.2f}"):
if self.ml.noisemodel and self.ml.settings["noise"]:
series = self.ml.noise(tmin=tmin, tmax=tmax)
nparam = self.ml.noisemodel.nparam
else:
series = self.ml.residuals(tmin=tmin, tmax=tmax)
nparam = 0
return diagnostics(series=series, alpha=alpha, nparam=nparam,
stats=stats, float_fmt=float_fmt)
|
|
#!/usr/bin/env python
"""
For a VEGAS'd VCF file, calculate a distance score for each motif delta score, ChIP z-score, and gene expression
z-score set for each variant. These values will be plotted in 3 dimensional space. Additionally, simple bed-like files
are provided as output. One contains the scores for all motif, ChIP z-score, and gene expression z-score sets for all
variants. An optional second contains only those sets that meet the distance score threshold as defined by the user.
A third will report the sets for the top 100 distance scores.
Usage: summarize.py -i <input.vcf> -o <output> [OPTIONS]
Args:
-i (str): Path to sorted variant file to process.
-o (str): Prefix for output files.
-d (float, optional): Distance magnitude threshold that must be met for variants/genes to be reported to output.
Default is 0, so all variant-sample activity-gene set distances will be reported.
"""
import argparse
import time
import pandas as pd
from math import sqrt
import plotly
import plotly.graph_objs as go
import numpy as np
from utils import Position, timeString
# YYY-JA 04/24/2017 - Hate making yet another variant (Ha) of this class.
# Move to utils.py and make uniform across modules.
class Variant(object):
"""
Use to process and handle variant records from a VCF more easily. Create from line of VCF file.
"""
def __init__(self, line):
self.line_list = line.strip().split("\t")
self.pos = Position(self.line_list[0], int(self.line_list[1]),
(int(self.line_list[1]) + len(self.line_list[3])))
self.ref_allele = self.line_list[3]
self.var_allele = self.line_list[4]
self.iden = self.line_list[2]
self.orig_line = line.strip()
self.info_fields = self.line_list[7].split(";")
(self.common_samples, self.motif_fields, self.exp_fields, self.act_fields,
self.genes) = self.parse_info_fields()
self.motif_scores = self.get_motif_scores() # Calculate delta motif scores and place into a dict.
self.sample_data = self.get_sample_data() # Parse all combos of gene expression and loci activity data.
self.output = self.get_variant_summary() # Get output lines as a list of lists.
if self.common_samples is not None: # Should never evaluate to False.
self.num_com_samps = len(self.common_samples)
else:
self.num_com_samps = 0
def parse_info_fields(self):
"""
Get names of samples containing variant and motif INFO fields from a variant record's INFO fields.
Args:
self (Variant): Variant object.
Returns:
common_samples (dict of tuple): List of samples that had variant called and have loci and expression data.
motif_fields (list of str): List of INFO fields for variant that contain MOTIF related information.
exp_fields (list of str): List of INFO fields for variant that contain Expression related information.
act_fields (list of str): List of INFO fields for variant that contain Activity/Loci related information.
genes (list of str): List of INFO fields for variant that contain MOTIF related information.
"""
act_samples = None
exp_samples = None
common_samples = []
motif_fields = []
exp_fields = []
act_fields = []
genes = None
for field in self.info_fields:
if field != "INDEL": # Take care of INDEL flag.
field_info = field.split("=")
# YYY-JA - This is a hack around a bug that's messing up the MOTIFN field in tf_expression.py.
# Go back and actually figure out why the MOTIFN field is getting split up sometimes.
try:
name, data = (field_info[0], field_info[1])
except:
name, data = "BROKEN", None
else:
name, data = "INDEL", None
# Break up info fields.
# YYY-JA 04/25/2017 - could easily be simplified by changing output fields to something more standardized.
if name.startswith("MOTIF"):
motif_fields.append(field)
elif name.startswith("EXP"):
exp_fields.append(field)
# Get variant samples with expression data.
if name == "EXPV":
exp_samples = data.split(",")
elif name.startswith("GENE"):
genes = data.split(',')
elif name.startswith("LOCI") or name.startswith("SAMP"):
act_fields.append(field)
# Get variant samples with locus activity data.
if name == "SAMPSV":
act_samples = data.split(",")
common_samples = compare_samples(exp_samples, act_samples)
return (common_samples, motif_fields, exp_fields, act_fields, genes)
def get_motif_scores(self):
"""
Returns the difference between reference and variant scores for each motif the variant matches
as a dictionary of {motif_name: (variant - reference log likelihood ratios)}.
Returns:
motifs (dict of floats): {motif_name: (variant - reference log likelihood ratios)}
"""
# List of lists [[field_name1, data1], [field_name2, data2]...]
motif_fields = [x.split("=") for x in self.motif_fields]
motifs = {}
for x in motif_fields:
# Get motif names and var/ref scores.
if x[0] == "MOTIFN":
names = x[1].split(",")
elif x[0] == "MOTIFV":
var_scores = x[1].split(",")
elif x[0] == "MOTIFR":
ref_scores = x[1].split(",")
for i in names:
idx = names.index(i)
diff = float(var_scores[idx]) - float(ref_scores[idx])
motifs[i] = diff
return motifs
def get_sample_data(self):
"""
Parses and returns the gene expression and loci activity z-scores for variant samples.
Returns:
sample_data (list of lists of str): [[sample, loci1, sample act_z_score, gene1, sample exp_z_score],
[sample, loci1, sample act_z_score, gene2, sample exp_z_score]...]
"""
samples = self.common_samples
act_data = [x.split("=") for x in self.act_fields]
exp_data = [x.split("=") for x in self.exp_fields]
genes = self.genes
sample_data = []
for x in act_data:
# Get loci id.
if x[0] == "LOCIID":
loci = x[1].split(",")
# Get each set of z-scores for each loci.
elif x[0] == "LOCIVZ":
act_z_scores = [x.strip("(").strip(")").split(',') for x in x[1].split("),(")]
# Create dict for loci - {loci_id: [act_z_scores]}
loci_data = {k: v for k, v in zip(loci, act_z_scores)}
for x in exp_data:
# Get each set of z-scores for each gene.
if x[0] == "EXPVZ":
gene_z_scores = [x.strip("(").strip(")").split(',') for x in x[1].split("),(")]
# Create dict for genes - {gene: [gene_z_scores]}
gene_data = {k: v for k, v in zip(genes, gene_z_scores)}
# Create list of lists containing all sets of relevant data combinations for all samples with a variant.
# [[sample, loci1, sample act_z_score, gene1, sample exp_z_score],
# [sample, loci1, sample act_z_score, gene2, sample exp_z_score]...]
for i in samples: # First iterate through all samples and get their expression and activity indices.
e_idx = samples[i][0]
a_idx = samples[i][1]
for l in loci_data: # Iterate through each locus for given sample.
# print(loci_data[l], str(a_idx), sep="\t")
samp_act_z = loci_data[l][a_idx]
for g in gene_data: # And now each gene for each locus.
samp_exp_z = gene_data[g][e_idx]
sample_data.append([i, l, samp_act_z, g, samp_exp_z])
return sample_data
def get_variant_summary(self):
"""
Creates a list of summary output fields for a given Variant as well as its distance metrics for plotting.
Returns:
output_fields (list of lists of str): List of lists of output fields for the Variant.
"""
var_info = [self.pos.chrom, self.pos.start, self.ref_allele, self.var_allele]
motif_info = self.motif_scores
sample_info = self.sample_data
output_fields = []
for m in motif_info:
m_score = motif_info[m] # Motif delta score for var vs ref.
for s in sample_info:
dist_metrics = [(float(m_score)), float(s[2]), float(s[4])] # To be used for plotting later.
dist_score = calc_distance(dist_metrics)
# Create complete list of output fields.
output_fields.append(var_info + [m] + [m_score] + [s[0]] + [s[1]] + [s[2]] + [s[3]] +
[s[4]] + [dist_score])
return (output_fields)
def compare_samples(exp_samples, act_samples):
"""
Compare gene expression and activity samples with variant and return common samples as a dict
of format {sample_name: (expression_index, activity_index)} so expression and activity
z-scores can be found appropriately.
Args:
exp_samples (list of str): List of variant samples with expression data.
act_samples (list of str): List of variant samples with activity data.
Returns:
common_samps (dict of tuple): {sample_name: (expression_index, activity_index)}
"""
common_samps = {}
samps = list(set(exp_samples) & set(act_samples)) # Get list of common samples.
# Create new dict for common samples with indexes for sample positions in terms of expression/activity z-scores.
for x in samps:
exp_idx = exp_samples.index(x)
act_idx = act_samples.index(x)
common_samps[x] = (exp_idx, act_idx)
return common_samps
def calc_distance(score_array):
"""
Returns a distance for a motif log-odds ratios, activity z-score, and gene expression z-score.
"""
return sqrt((score_array[0] ** 2) + (score_array[1] ** 2) + (score_array[2] ** 2))
def plot_distances(df, out_prefix):
"""
Plot distance scores calculated for the delta var/ref motif log-odds ratios, activity z-score, and gene expression
z-score sets for each variant.
Args:
df (pandas Dataframe) = Dataframe containing all variant, distance metric, and sample info. One record per row.
out_prefix (str) = Prefix to use for plot outputs.
"""
# Take top 30k hits only, plotly handle really handle more for 3D plots.
if len(df) > 30000:
df = df.head(30000)
info = list(zip(df.SAMPLE, df.MOTIF, df.GENE))
info_list = ["Sample: " + x[0] + ", Motif: " + x[1] + ", Gene: " + x[2] for x in info]
trace1 = go.Scatter3d(
name="Distances",
x=df['VAR-REF_SCORE'],
y=df.ACT_ZSCORE,
z=df.EXP_ZSCORE,
hoverinfo="x+y+z+text",
text=info_list,
mode='markers',
marker=dict(
size=4, # Using gene expression as color scale values for now.
color=df.SCALED_DISTANCE, # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.8
)
)
data = [trace1]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=100
),
title='Distances from Average for Individual Variant Events',
scene=dict(
xaxis=dict(
title='Var/Ref Motif Log-Odds Ratio Difference',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Enhancer Activity z-Score',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
zaxis=dict(
title='Gene Expression z-Score',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
)
fig = go.Figure(data=data, layout=layout)
# py.image.save_as(fig, filename=out_prefix + '.png')
plotly.offline.plot(fig, filename=out_prefix + '.html', auto_open=False, image="png", image_filename=out_prefix)
def scale_and_frame(all_output):
"""
Return dataframe after adding scaled distance score to each row. Scaling done by dividing with max value of each
metric, summation, and taking square root. Make things much easier to plot and handle.
Args:
all_output (list of lists): Each list contains a set of motif, expression, activity data.
[sample, loci1, sample act_z_score, gene1, sample exp_z_score, distance]
Returns:
df (pandas Dataframe): Each row contains a set of motif, expression, activity data.
[sample, loci1, sample act_z_score, gene1, sample exp_z_score, distance, scaled_distance]
"""
df = pd.DataFrame(all_output, columns=['CHR', 'POS', 'REF', 'ALT', 'MOTIF', 'VAR-REF_SCORE', 'SAMPLE', 'LOCIID',
'ACT_ZSCORE', 'GENE', 'EXP_ZSCORE', 'DISTANCE'])
df[['VAR-REF_SCORE', 'ACT_ZSCORE', 'EXP_ZSCORE', 'DISTANCE']] = df[['VAR-REF_SCORE', 'ACT_ZSCORE', 'EXP_ZSCORE',
'DISTANCE']].apply(pd.to_numeric)
print(df.head(10))
# Get scaling factors.
motif_score_scale = max(df['VAR-REF_SCORE'] ** 2)
act_score_scale = max(df.ACT_ZSCORE ** 2)
gene_score_scale = max(df.EXP_ZSCORE ** 2)
# Scale the distance and create new column.
df['SCALED_DISTANCE'] = np.sqrt(((df['VAR-REF_SCORE'] ** 2) / motif_score_scale) +
((df.ACT_ZSCORE ** 2) / act_score_scale) +
((df.EXP_ZSCORE ** 2) / gene_score_scale))
# Sort by distance.
df.sort_values('SCALED_DISTANCE', ascending=False, inplace=True)
return df
def main(vcf_file, out_prefix, d_thresh):
"""
Args:
vcf_file (str): Path to sorted variant file to process.
out_prefix (str): Prefix to be used for output files.
d_thresh (float): Distance threshold to be used for more restricted plotting and reporting.
"""
with open(vcf_file) as f:
line = f.readline().strip()
now = time.strftime("%c")
command = ('##venusar=<ID=summary,Date="' + now + '",CommandLineOptions="--input ' + vcf_file +
' --output ' + out_prefix + ' --dthresh ' + str(d_thresh) + '">')
print(command)
full_out_file = open(out_prefix + "_full.txt", "w") # Full output.
top_out_file = open(out_prefix + "_top100.txt", "w") # Top 100 hits.
if d_thresh != 0:
rest_out_file = open(out_prefix + "_restricted.txt", "w") # Restricted by distance threshold.
all_output = []
# Print new info lines at the top of the ##INFO section.
while line.startswith("#"):
line = f.readline().strip()
print("Reading and processing input file.")
for line in f:
current_var = Variant(line) # Most processing happening here.
# Piece out full hits, restricted hits, top hits for everything.
full_var_output = current_var.output
for x in full_var_output:
all_output.append(x)
# Scale distance metrics.
print("Calculating distance metrics.")
scaled_df = scale_and_frame(all_output)
print("Creating output files and plots.")
scaled_df.to_csv(rest_out_file, sep="\t", index=False)
if d_thresh != 0:
restricted_scaled_df = scaled_df[scaled_df.SCALED_DISTANCE > d_thresh]
restricted_scaled_df.to_csv(full_out_file, sep="\t", index=False)
plot_distances(restricted_scaled_df, out_prefix + "_restricted")
# Get top 100 hits by distance.
top100_df = scaled_df.head(100)
top100_df.to_csv(top_out_file, sep="\t", index=False)
# Plotting - only plots top 30k hits as browsers can't handle more.
plot_distances(scaled_df, out_prefix + "_full")
plot_distances(top100_df, out_prefix + "_top")
print("Complete at: " + timeString() + ".")
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument("-i", "--input", dest="inp_file", required=True)
parser.add_argument("-o", "--output", dest="out_pre", required=True)
parser.add_argument("-d", "--dthresh", dest="d_thresh", required=False, default=0, type=float)
args = parser.parse_args()
main(args.inp_file, args.out_pre, args.d_thresh)
|
|
"""
Centralized database access testsfor the American Gut web portal
"""
__author__ = "Emily TerAvest"
__copyright__ = "Copyright 2014, American Gut Project"
__credits__ = ["Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.0.0.dev"
__maintainer__ = ["Emily TerAvest"]
__email__ = "emily.teravest@colorado.edu"
__status__ = "Production"
from unittest import TestCase, main
from datetime import datetime
import psycopg2
import psycopg2.extras
from passlib.hash import bcrypt
from amgut.lib.data_access.ag_data_access import AGDataAccess
from amgut.lib.config_manager import AMGUT_CONFIG
from amgut.lib.util import ag_test_checker
@ag_test_checker()
class TestAGDataAccess(TestCase):
def setUp(self):
self.con = psycopg2.connect(user=AMGUT_CONFIG.user,
password=AMGUT_CONFIG.password,
database=AMGUT_CONFIG.database,
host=AMGUT_CONFIG.host,
port=AMGUT_CONFIG.port)
self.data_access = AGDataAccess(self.con)
self.data_access.ag_update_kit_password('test',
AMGUT_CONFIG.badpassword)
self.con.commit()
def tearDown(self):
self.data_access.ag_update_kit_password('test',
AMGUT_CONFIG.goodpassword)
self.con.close()
def test_authenticateWebAppUser(self):
self.assertFalse(self.data_access.authenticateWebAppUser('bad',
'wrong'))
data = self.data_access.authenticateWebAppUser(
'test', AMGUT_CONFIG.badpassword)
self.assertEqual(data['email'], 'test@microbio.me')
def test_addAGLogin(self):
self.data_access.addAGLogin('deleteme@no.no', 'test', 'test',
'test', 'CO', '80303', 'USA')
cur = self.con.cursor()
cur.execute(
'select * from ag_login where email = %s', ('deleteme@no.no',))
rec = cur.fetchall()
self.assertEqual(len(rec), 1)
cur.execute('delete from ag_login where email = %s',
('deleteme@no.no',))
self.con.commit()
cur.execute(
'select * from ag_login where email = %s', ('deleteme@no.no',))
rec = cur.fetchall()
self.assertEqual(len(rec), 0)
def test_updateAGLogin(self):
self.data_access.updateAGLogin('d8592c74-7da1-2135-e040-8a80115d6401',
'changed@changed.com', '', 'add',
'city', 'state', 'zip', 'USA')
cur = self.con.cursor()
cur.execute('select * from ag_login where ag_login_id = %s',
('d8592c74-7da1-2135-e040-8a80115d6401',))
rec = cur.fetchone()
self.assertEqual(rec[1], 'changed@changed.com')
self.data_access.updateAGLogin('d8592c74-7da1-2135-e040-8a80115d6401',
'test@microbio.me', 'Test', 'Test',
'Boulder', 'CO', '80303',
'United States')
cur.execute('select * from ag_login where ag_login_id = %s',
('d8592c74-7da1-2135-e040-8a80115d6401',))
rec = cur.fetchone()
self.assertEqual(rec[1], 'test@microbio.me')
def test_getAGKitsByLogin(self):
data = self.data_access.getAGKitsByLogin()
self.assertTrue({'email': 'test@microbio.me',
'supplied_kit_id': 'test',
'ag_kit_id': 'd8592c74-7da2-2135-e040-8a80115d6401'}
in data)
def test_getAGBarcodeDetails(self):
data = self.data_access.getAGBarcodeDetails('000000001')
self.assertEqual(data['participant_name'], 'foo')
self.assertEqual(data['site_sampled'], 'Stool')
self.assertEqual(data['status'], 'Received')
def test_getAGKitDetails(self):
data = self.data_access.getAGKitDetails('test')
self.assertEqual(data['kit_verification_code'], 'test')
def test_getNextAGBarcode(self):
barcode, barcode_text = self.data_access.getNextAGBarcode()
data = self.data_access.getAGBarcodes()
self.assertTrue(barcode_text not in data)
def test_updateAGKit(self):
self.data_access.updateAGKit('d8592c74-7da2-2135-e040-8a80115d6401',
'test22', 'newpass', 24, 'ver')
cur = self.con.cursor()
cur.execute('select * from ag_kit where ag_kit_id = %s',
('d8592c74-7da2-2135-e040-8a80115d6401',))
rec = cur.fetchone()
self.assertEqual(rec[2], 'test22')
self.data_access.updateAGKit('d8592c74-7da2-2135-e040-8a80115d6401',
'test', 'oldpass', 1, 'test')
cur.execute('select * from ag_kit where ag_kit_id = %s',
('d8592c74-7da2-2135-e040-8a80115d6401',))
rec = cur.fetchone()
self.assertEqual(rec[2], 'test')
def test_updateAGBarcode(self):
self.data_access.updateAGBarcode(
'000010860', 'd8592c74-7da2-2135-e040-8a80115d6401', 'Stool', '',
'07/30/2014', '9:30 AM', 'test', 'notes', 'n', 'n')
cur = self.con.cursor()
cur.execute('select * from ag_kit_barcodes where barcode = %s',
('000010860',))
rec = cur.fetchone()
self.assertEqual(rec[6], 'Stool')
self.assertEqual(rec[7], '07/30/2014')
self.data_access.updateAGBarcode(
'000010860', 'd8592c74-7da2-2135-e040-8a80115d6401', '', '', '',
'', '', '', '', '')
cur.execute('select * from ag_kit_barcodes where barcode = %s',
('000010860',))
rec = cur.fetchone()
self.assertEqual(rec[6], '')
self.assertEqual(rec[7], '')
def test_registerHandoutKit(self):
ag_login_id = 'd8592c74-7da1-2135-e040-8a80115d6401'
self.data_access.registerHandoutKit(
ag_login_id, 'test_ha')
cur = self.con.cursor()
# make sure handout kit removed
cur.execute("SELECT * FROM ag_handout_kits")
obs = cur.fetchall()
self.assertEqual(obs, [])
# make sure handout kit registered as regular kit
cur.execute("SELECT * FROM ag_kit WHERE supplied_kit_id = 'test_ha'",
[ag_login_id])
obs = cur.fetchall()
exp = [('a70a398c-a29e-4367-8ae2-f291d5217b29',
'd8592c74-7da1-2135-e040-8a80115d6401',
'test_ha', '1234', 3, '5678', 'n', 'n', None, None, 'n', None)]
self.assertEqual(obs, exp)
kit_id = obs[0][0]
# make sure barcodes registered
cur.execute("SELECT * FROM barcode JOIN ag_kit_barcodes USING "
"(barcode) WHERE ag_kit_id = %s",
[kit_id])
obs = cur.fetchall()
exp = [
('000000004', datetime(2015, 4, 29, 9, 25, 51, 842222), None, None,
None, None, None, None, 'f3033ee2-391c-4f24-b0eb-f9ed1d26444a',
'a70a398c-a29e-4367-8ae2-f291d5217b29', None, '000000004.jpg',
None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None),
('000000003', datetime(2015, 4, 29, 9, 25, 51, 842222), None, None,
None, None, None, None, '8e3b037e-fc79-4523-9816-dc7e9d250ebb',
'a70a398c-a29e-4367-8ae2-f291d5217b29', None, '000000003.jpg',
None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None),
('000000002', datetime(2015, 4, 29, 9, 25, 51, 842222), None, None,
None, None, None, None, 'f98bf005-d308-4994-a1fd-9826ab3dbb9d',
'a70a398c-a29e-4367-8ae2-f291d5217b29', None, '000000002.jpg',
None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None)]
self.assertItemsEqual(obs, exp)
def test_deleteAGParticipantSurvey(self):
cur = self.con.cursor()
cur.execute('insert into ag_consent (ag_login_id, '
'participant_name, participant_email) values (%s, %s, %s)',
('d8592c747da12135e0408a80115d6401', 'sp_test',
'foo@bar.com'))
self.con.commit()
cur.execute('insert into ag_login_surveys (ag_login_id, survey_id,'
' participant_name) values (%s, %s, %s)',
('d8592c74-7da1-2135-e040-8a80115d6401', '1235',
'sp_test'))
self.con.commit()
self.data_access.deleteAGParticipantSurvey(
'd8592c747da12135e0408a80115d6401', 'sp_test')
data = self.data_access.getHumanParticipants(
'd8592c747da12135e0408a80115d6401')
self.assertEqual(len(data), 1)
def test_logParticipantSample(self):
self.data_access.logParticipantSample(
'd8592c74-7da1-2135-e040-8a80115d6401', '000010860', 'Stool', '',
'07/29/2014', '09:30 AM', 'foo', 'no notes')
data = self.data_access.getAGBarcodeDetails('000010860')
self.assertEqual(data['participant_name'], 'foo')
self.assertEqual(data['site_sampled'], 'Stool')
self.assertEqual(data['sample_date'], '07/29/2014')
self.data_access.deleteSample('000010860',
'd8592c747da12135e0408a80115d6401')
data = self.data_access.getAGBarcodeDetails('000010860')
self.assertEqual(data['participant_name'], None)
def test_deleteSample(self):
cur = self.con.cursor()
cur.execute('update ag_kit_barcodes set site_sampled = %s,'
'sample_date = %s, participant_name = %s, '
'sample_time = %s where barcode = %s', ('Stool',
'07/30/2014',
'test', '9:30 AM',
'000010860'))
self.con.commit()
data = self.data_access.getAGBarcodeDetails('000010860')
self.assertEqual(data['site_sampled'], 'Stool')
self.data_access.deleteSample('000010860',
'd8592c747da12135e0408a80115d6401')
data = self.data_access.getAGBarcodeDetails('000010860')
self.assertEqual(data['participant_name'], None)
def test_getHumanParticipants(self):
data = self.data_access.getHumanParticipants(
'd8592c74-7da1-2135-e040-8a80115d6401')
self.assertEqual(set(data), {'foo'})
def test_getAnimalParticipants(self):
data = self.data_access.getAnimalParticipants(
'd8592c74-7da1-2135-e040-8a80115d6401')
# this test needs updated when the test database is updated
self.assertEqual(len(data), 0)
def test_getParticipantSamples(self):
data = self.data_access.getParticipantSamples(
'd8592c74-7da1-2135-e040-8a80115d6401', 'foo')
self.assertEqual(data[0]['status'], 'Received')
self.assertEqual(data[0]['barcode'], '000000001')
def test_getEnvironmentalSamples(self):
data = self.data_access.getEnvironmentalSamples(
'd8592c74-7da1-2135-e040-8a80115d6401')
barcodes = {x['barcode'] for x in data}
# TODO: This is broken -- there are no environmental samples associated
# with the test user. We need to set something up like we have in qiita
# where a test DB is set up and torn down for each individual test
self.assertEqual(barcodes, set())
def test_getAvailableBarcodes(self):
data = self.data_access.getAvailableBarcodes(
'd8592c74-7da1-2135-e040-8a80115d6401')
self.assertEqual(len(data), 1)
def test_verifyKit(self):
cur = self.con.cursor()
cur.execute(' update ag_kit set kit_verified = %s where '
'supplied_kit_id = %s', ('n', 'test',))
self.con.commit()
self.data_access.verifyKit('test')
cur.execute('select kit_verified from ag_kit where supplied_kit_id = '
'%s', ('test',))
rec = cur.fetchone()
self.assertEqual(rec[0], 'y')
def test_getMapMarkers(self):
data = self.data_access.getMapMarkers()
self.assertNotEqual(len(data), 0)
def test_handoutCheck(self):
is_handout = self.data_access.handoutCheck('test', 'wrongpass')
self.assertFalse(is_handout)
def test_getAGStats(self):
data = self.data_access.getAGStats()
self.assertEqual(len(data), 23)
def test_updateAKB(self):
self.data_access.updateAKB('000010860', 'n', 'n', 'y',
'some other text', '07/30/2014')
cur = self.con.cursor()
cur.execute('select * from ag_kit_barcodes where barcode = %s',
('000010860',))
rec = cur.fetchone()
self.assertEqual([rec[12], rec[13], rec[14]], ['n', 'n', 'y'])
self.data_access.updateAKB('000010860', None, None, None, None, None)
cur.execute('select * from ag_kit_barcodes where barcode = %s',
('000010860',))
rec = cur.fetchone()
self.assertEqual([rec[12], rec[13], rec[14]], [None, None, None])
def test_getAGKitIDsByEmail(self):
data = self.data_access.getAGKitIDsByEmail('test@microbio.me')
self.assertEqual(set(data), {'test', '1111'})
def test_ag_set_pass_change_code(self):
self.data_access.ag_set_pass_change_code('test@microbio.me', 'test',
'123456789')
cur = self.con.cursor()
cur.execute('select pass_reset_code from ag_kit where '
'supplied_kit_id = %s', ('test',))
rec = cur.fetchone()
self.assertEqual(rec[0], '123456789')
cur.execute('update ag_kit set pass_reset_code = %s, '
'pass_reset_time = %s where supplied_kit_id = %s',
('', None, 'test',))
self.con.commit()
def test_ag_update_kit_password(self):
self.data_access.ag_update_kit_password('test', 'newpass')
cur = self.con.cursor()
cur.execute('select kit_password from ag_kit where supplied_kit_id = '
'%s', ('test',))
rec = cur.fetchone()
self.assertTrue(bcrypt.verify('newpass', rec[0]))
def test_ag_verify_kit_password_change_code(self):
self.data_access.ag_set_pass_change_code('test@microbio.me', 'test',
'123456789')
result = self.data_access.ag_verify_kit_password_change_code(
'test@microbio.me', 'test', '123456789')
self.assertEqual(result, 1)
cur = self.con.cursor()
cur.execute('update ag_kit set pass_reset_code = %s, '
'pass_reset_time = %s where supplied_kit_id = %s',
('', None, 'test',))
self.con.commit()
def test_getBarcodesByKit(self):
observed = self.data_access.getBarcodesByKit('test')
expected = {'000010860', '000006616', '000000001'}
self.assertEqual(set(observed), expected)
def test_checkPrintResults(self):
data = self.data_access.checkPrintResults('test')
self.assertTrue(data is None)
def test_get_user_for_kit(self):
data = self.data_access.get_user_for_kit('test')
self.assertEqual(data, 'd8592c74-7da1-2135-e040-8a80115d6401')
def test_menu_items(self):
data = self.data_access.get_menu_items('test')
self.assertEqual(data[0]['foo'][0]['barcode'], '000000001')
def test_get_user_info(self):
data = self.data_access.get_user_info('test')
self.assertEqual(data['email'], 'test@microbio.me')
def test_get_barcode_results(self):
data = self.data_access.get_barcode_results('test')
self.assertEqual(len(data), 1)
data = self.data_access.get_barcode_results('1111')
self.assertEqual(len(data), 0)
if __name__ == "__main__":
main()
|
|
# -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import engine_from_config
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base as mssql
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assertions
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.mock import Mock
def _legacy_schema_aliasing_warning():
return assertions.expect_deprecated("The legacy_schema_aliasing parameter")
class LegacySchemaAliasingTest(fixtures.TestBase, AssertsCompiledSQL):
"""Legacy behavior tried to prevent schema-qualified tables
from being rendered as dotted names, and were instead aliased.
This behavior no longer seems to be required.
"""
def setup_test(self):
metadata = MetaData()
self.t1 = table(
"t1",
column("a", Integer),
column("b", String),
column("c", String),
)
self.t2 = Table(
"t2",
metadata,
Column("a", Integer),
Column("b", Integer),
Column("c", Integer),
schema="schema",
)
def _assert_sql(self, element, legacy_sql, modern_sql=None):
dialect = self._legacy_dialect()
self.assert_compile(element, legacy_sql, dialect=dialect)
dialect = mssql.dialect()
self.assert_compile(element, modern_sql or "foob", dialect=dialect)
def _legacy_dialect(self):
with _legacy_schema_aliasing_warning():
return mssql.dialect(legacy_schema_aliasing=True)
@testing.combinations(
(
{
"sqlalchemy.url": "mssql://foodsn",
"sqlalchemy.legacy_schema_aliasing": "true",
},
True,
),
(
{
"sqlalchemy.url": "mssql://foodsn",
"sqlalchemy.legacy_schema_aliasing": "false",
},
False,
),
)
def test_legacy_schema_flag(self, cfg, expected):
with testing.expect_deprecated("The legacy_schema_aliasing parameter"):
e = engine_from_config(
cfg, module=Mock(version="MS SQL Server 11.0.92")
)
is_(e.dialect.legacy_schema_aliasing, expected)
def test_result_map(self):
s = self.t2.select()
c = s.compile(dialect=self._legacy_dialect())
assert self.t2.c.a in set(c._create_result_map()["a"][1])
def test_result_map_use_labels(self):
s = self.t2.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
c = s.compile(dialect=self._legacy_dialect())
assert self.t2.c.a in set(c._create_result_map()["schema_t2_a"][1])
def test_straight_select(self):
self._assert_sql(
self.t2.select(),
"SELECT t2_1.a, t2_1.b, t2_1.c FROM [schema].t2 AS t2_1",
"SELECT [schema].t2.a, [schema].t2.b, "
"[schema].t2.c FROM [schema].t2",
)
def test_straight_select_use_labels(self):
self._assert_sql(
self.t2.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b, "
"t2_1.c AS schema_t2_c FROM [schema].t2 AS t2_1",
"SELECT [schema].t2.a AS schema_t2_a, "
"[schema].t2.b AS schema_t2_b, "
"[schema].t2.c AS schema_t2_c FROM [schema].t2",
)
def test_join_to_schema(self):
t1, t2 = self.t1, self.t2
self._assert_sql(
t1.join(t2, t1.c.a == t2.c.a).select(),
"SELECT t1.a, t1.b, t1.c, t2_1.a AS a_1, t2_1.b AS b_1, "
"t2_1.c AS c_1 FROM t1 "
"JOIN [schema].t2 AS t2_1 ON t2_1.a = t1.a",
"SELECT t1.a, t1.b, t1.c, [schema].t2.a AS a_1, "
"[schema].t2.b AS b_1, "
"[schema].t2.c AS c_1 FROM t1 JOIN [schema].t2 "
"ON [schema].t2.a = t1.a",
)
def test_union_schema_to_non(self):
t1, t2 = self.t1, self.t2
s = (
select(t2.c.a, t2.c.b)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.union(
select(t1.c.a, t1.c.b).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
)
.alias()
.select()
)
self._assert_sql(
s,
"SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM "
"(SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b "
"FROM [schema].t2 AS t2_1 UNION SELECT t1.a AS t1_a, "
"t1.b AS t1_b FROM t1) AS anon_1",
"SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM "
"(SELECT [schema].t2.a AS schema_t2_a, [schema].t2.b AS "
"schema_t2_b FROM [schema].t2 UNION SELECT t1.a AS t1_a, "
"t1.b AS t1_b FROM t1) AS anon_1",
)
def test_column_subquery_to_alias(self):
a1 = self.t2.alias("a1")
s = select(self.t2, select(a1.c.a).scalar_subquery())
self._assert_sql(
s,
"SELECT t2_1.a, t2_1.b, t2_1.c, "
"(SELECT a1.a FROM [schema].t2 AS a1) "
"AS anon_1 FROM [schema].t2 AS t2_1",
"SELECT [schema].t2.a, [schema].t2.b, [schema].t2.c, "
"(SELECT a1.a FROM [schema].t2 AS a1) AS anon_1 FROM [schema].t2",
)
class LegacySchemaAliasingBackendTest(
testing.AssertsExecutionResults, fixtures.TestBase
):
__backend__ = True
__only_on__ = "mssql"
@testing.provide_metadata
def test_insertid_schema(self):
meta = self.metadata
with _legacy_schema_aliasing_warning():
eng = engines.testing_engine(
options=dict(legacy_schema_aliasing=False)
)
tbl = Table(
"test",
meta,
Column("id", Integer, primary_key=True),
schema=testing.config.test_schema,
)
with eng.begin() as conn:
tbl.create(conn)
conn.execute(tbl.insert(), {"id": 1})
eq_(conn.scalar(tbl.select()), 1)
@testing.provide_metadata
def test_insertid_schema_legacy(self):
meta = self.metadata
tbl = Table(
"test",
meta,
Column("id", Integer, primary_key=True),
schema=testing.config.test_schema,
)
with _legacy_schema_aliasing_warning():
eng = engines.testing_engine(
options=dict(legacy_schema_aliasing=True)
)
with eng.begin() as conn:
tbl.create(conn)
conn.execute(tbl.insert(), {"id": 1})
eq_(conn.scalar(tbl.select()), 1)
@testing.provide_metadata
def test_delete_schema_legacy(self):
meta = self.metadata
with _legacy_schema_aliasing_warning():
eng = engines.testing_engine(
options=dict(legacy_schema_aliasing=True)
)
tbl = Table(
"test",
meta,
Column("id", Integer, primary_key=True),
schema=testing.config.test_schema,
)
with eng.begin() as conn:
tbl.create(conn)
conn.execute(tbl.insert(), {"id": 1})
eq_(conn.scalar(tbl.select()), 1)
conn.execute(tbl.delete().where(tbl.c.id == 1))
eq_(conn.scalar(tbl.select()), None)
|
|
import json
import os
import tempfile
from collections.abc import Sized
from functools import partial
from io import StringIO
from pathlib import Path
from typing import Dict
import pytest
from schema_salad.utils import yaml_no_ts
import cwltool.pack
import cwltool.workflow
from cwltool.context import LoadingContext
from cwltool.load_tool import fetch_document, resolve_and_validate_document
from cwltool.main import main, make_relative, print_pack
from cwltool.resolver import tool_resolver
from cwltool.utils import adjustDirObjs, adjustFileObjs
from .util import get_data, needs_docker
@pytest.mark.parametrize(
"unpacked,expected",
[
("tests/wf/revsort.cwl", "tests/wf/expect_packed.cwl"),
(
"tests/wf/operation/operation-single.cwl",
"tests/wf/operation/expect_operation-single_packed.cwl",
),
("tests/wf/trick_revsort.cwl", "tests/wf/expect_trick_packed.cwl"),
(
"tests/wf/iwd-passthrough1.cwl",
"tests/wf/expect_iwd-passthrough1_packed.cwl",
),
(
"tests/wf/revsort_datetime.cwl",
"tests/wf/expect_revsort_datetime_packed.cwl",
),
],
)
def test_packing(unpacked: str, expected: str) -> None:
"""Compare expected version reality with various workflows and --pack."""
loadingContext, workflowobj, uri = fetch_document(get_data(unpacked))
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
packed = json.loads(print_pack(loadingContext, uri))
context_dir = os.path.abspath(os.path.dirname(get_data(unpacked)))
adjustFileObjs(packed, partial(make_relative, context_dir))
adjustDirObjs(packed, partial(make_relative, context_dir))
with open(get_data(expected)) as packed_file:
expect_packed = json.load(packed_file)
if "$schemas" in expect_packed:
assert "$schemas" in packed
packed_schemas = packed["$schemas"]
assert isinstance(packed_schemas, Sized)
assert len(packed_schemas) == len(expect_packed["$schemas"])
del packed["$schemas"]
del expect_packed["$schemas"]
assert packed == expect_packed
def test_pack_single_tool() -> None:
loadingContext, workflowobj, uri = fetch_document(
get_data("tests/wf/formattest.cwl")
)
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
packed = cwltool.pack.pack(loadingContext, uri)
assert "$schemas" in packed
def test_pack_fragment() -> None:
yaml = yaml_no_ts()
with open(get_data("tests/wf/scatter2_subwf.cwl")) as packed_file:
expect_packed = yaml.load(packed_file)
loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/scatter2.cwl"))
packed = cwltool.pack.pack(loadingContext, uri + "#scatterstep/mysub")
adjustFileObjs(
packed, partial(make_relative, os.path.abspath(get_data("tests/wf")))
)
adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
packed_result = json.dumps(packed, sort_keys=True, indent=2)
expected = json.dumps(expect_packed, sort_keys=True, indent=2)
assert packed_result == expected
def test_pack_rewrites() -> None:
rewrites = {} # type: Dict[str, str]
loadingContext, workflowobj, uri = fetch_document(
get_data("tests/wf/default-wf5.cwl")
)
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
cwltool.pack.pack(
loadingContext,
uri,
rewrite_out=rewrites,
)
assert len(rewrites) == 6
cwl_missing_version_paths = [
"tests/wf/hello_single_tool.cwl",
"tests/wf/hello-workflow.cwl",
]
@pytest.mark.parametrize("cwl_path", cwl_missing_version_paths)
def test_pack_missing_cwlVersion(cwl_path: str) -> None:
"""Ensure the generated pack output is not missing the `cwlVersion` in case of single tool workflow and single step workflow."""
# Testing single tool workflow
loadingContext, workflowobj, uri = fetch_document(get_data(cwl_path))
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
# generate pack output dict
packed = json.loads(print_pack(loadingContext, uri))
assert packed["cwlVersion"] == "v1.0"
def test_pack_idempotence_tool(tmp_path: Path) -> None:
"""Ensure that pack produces exactly the same document for an already packed CommandLineTool."""
_pack_idempotently("tests/wf/hello_single_tool.cwl", tmp_path)
def test_pack_idempotence_workflow(tmp_path: Path) -> None:
"""Ensure that pack produces exactly the same document for an already packed workflow."""
_pack_idempotently("tests/wf/count-lines1-wf.cwl", tmp_path)
def _pack_idempotently(document: str, tmp_path: Path) -> None:
loadingContext, workflowobj, uri = fetch_document(get_data(document))
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
# generate pack output dict
packed_text = print_pack(loadingContext, uri)
packed = json.loads(packed_text)
tmp_name = tmp_path / "packed.cwl"
tmp = tmp_name.open(mode="w")
tmp.write(packed_text)
tmp.flush()
tmp.close()
loadingContext, workflowobj, uri2 = fetch_document(tmp.name)
loadingContext.do_update = False
loadingContext, uri2 = resolve_and_validate_document(
loadingContext, workflowobj, uri2
)
loader2 = loadingContext.loader
assert loader2
loader2.resolve_ref(uri2)[0]
# generate pack output dict
packed_text = print_pack(loadingContext, uri2)
double_packed = json.loads(packed_text)
assert uri != uri2
assert packed == double_packed
cwl_to_run = [
("tests/wf/count-lines1-wf.cwl", "tests/wf/wc-job.json", False),
("tests/wf/formattest.cwl", "tests/wf/formattest-job.json", True),
]
@needs_docker
@pytest.mark.parametrize("wf_path,job_path,namespaced", cwl_to_run)
def test_packed_workflow_execution(
wf_path: str, job_path: str, namespaced: bool, tmp_path: Path
) -> None:
loadingContext = LoadingContext()
loadingContext.resolver = tool_resolver
loadingContext, workflowobj, uri = fetch_document(get_data(wf_path), loadingContext)
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
packed = json.loads(print_pack(loadingContext, uri))
assert not namespaced or "$namespaces" in packed
wf_packed_handle, wf_packed_path = tempfile.mkstemp()
with open(wf_packed_path, "w") as temp_file:
json.dump(packed, temp_file)
normal_output = StringIO()
packed_output = StringIO()
normal_params = ["--outdir", str(tmp_path), get_data(wf_path), get_data(job_path)]
packed_params = [
"--outdir",
str(tmp_path),
"--debug",
wf_packed_path,
get_data(job_path),
]
assert main(normal_params, stdout=normal_output) == 0
assert main(packed_params, stdout=packed_output) == 0
assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())
os.close(wf_packed_handle)
os.remove(wf_packed_path)
|
|
#!/usr/bin/python3
"""Script to manage creation/deletion of BTRFS subvolumes/snapshots.
This script helps facilitate the creation of BTRFS subvolumes and
snapshots, with an end towards enabling better SSD utilization for
Android development. Android "repo" clients can be very large: 60-70GB
for a fully built AOSP client, and 150-170GB for a fully built
internal-tree client. Most of this space is taken up by .git data
(compiled output/object files are only about 30GB), which makes such
clients ideal candidates for BTRFS (a filesystem that supports snapshots
with "copy-on-write" semantics).
The intended usage model for the script is as follows:
1. create a BTRFS subvolume 'vol'
2. populate an Android client within 'vol' using "repo init" / "repo sync"
3. create a BTRFS snapshot of 'vol' named 'work', then configure
and build within 'work'
4. repeat step 3 for different device configurations
After the subvolume is created, each snapshot will only have an
incremental cost of ~30GB (size of derived "out" dir), meaning that
one can fit 2-4 clients on a given 256GB SSD as opposed to only a
single client.
All of the above can be done using raw 'btrfs' commands, however the
process of doing so is tricky and error-prone. The role of this script
is to provide an additional layer that helps avoid problematic usages
(ex: creating a snapshot within a snapshot, etc). Available
subcommands:
mkvol -- create a new subvolume
mksnap -- create a new snapshot
rmvol -- remove subvolume
rmsnap -- remove snapshot
Note: because commands such as "btrfs subvolume create" have to be run
via "sudo", this script can really only be run interactively (since
'sudo' will prompt for input). The script also helps retroactively
change the ownership/permissions to correspond to the script
invoker in cases where new snapshots/volumes are created.
The assumptions made by this script:
1. the current dir at script invocation is somewhere in the BTRFS
SSD on which we want to operate
2. all snapshots/subvolumes will be direct descendents of the root
SSD (e.g. we're creating '/ssd/newvolume' and not '/ssd/x/y/newvolume'
"""
import getopt
import os
import re
import sys
import script_utils as u
# Subcommand
flag_subcommand = None
# Subcommand arguments
flag_subcommand_args = []
# Homedir, use
flag_homedir = os.getenv("HOME")
# Legal subcommands, with their required number of arguments
possible_subcommands = {
"mkvol": 1,
"mksnap": 2,
"rmvol": 1,
"rmsnap": 1
}
def repair(newvolume):
"""Repair ownership/permissions for new snapshot/subvolume."""
u.docmd("sudo chown --reference=%s %s" % (flag_homedir, newvolume))
u.docmd("sudo chgrp --reference=%s %s" % (flag_homedir, newvolume))
u.docmd("chmod 0750 %s" % newvolume)
def normalize(ssdroot, volsnapname):
"""Remove initial /ssdroot, check for bad name."""
sr = ssdroot + "/"
vsn = volsnapname
if volsnapname.startswith(sr):
srl = len(sr)
vsn = volsnapname[srl:]
if vsn.find("/") != -1:
u.error("illegal volume or snapshot name %s "
"(must refer to top level dir)" % volsnapname)
return vsn
def mkvol_subcommand(volname):
"""Create a new btrfs subvolume."""
# Determine /ssd root
ssdroot = u.determine_btrfs_ssdroot(os.getcwd())
u.verbose(1, "ssdroot=%s" % ssdroot)
# Normalize snap name
volname = normalize(ssdroot, volname)
# Check to make sure the new volume doesn't already exist
newvolume = "%s/%s" % (ssdroot, volname)
if os.path.exists(newvolume):
u.error("path %s already exists -- can't create" % newvolume)
# Here goes
u.docmd("sudo btrfs subvolume create %s" % newvolume)
# Repair ownership/permissions
repair(newvolume)
sys.stderr.write("... new subvolume %s created\n" % newvolume)
def mksnap_subcommand(volname, snapname):
"""Snapshot an existing BTRFS subvolume or snapshot."""
# Determine /ssd root
ssdroot = u.determine_btrfs_ssdroot(os.getcwd())
u.verbose(1, "ssdroot=%s" % ssdroot)
# Normalize snap name, volume name
volname = normalize(ssdroot, volname)
snapname = normalize(ssdroot, snapname)
# Existing volume should exist
oldvolume = "%s/%s" % (ssdroot, volname)
if not os.path.exists(oldvolume):
u.error("unable to locate existing subvolume %s" % oldvolume)
# Check to make sure the new snapshot doesn't already exist
newsnap = "%s/%s" % (ssdroot, snapname)
if os.path.exists(newsnap):
u.error("path %s already exists -- can't create" % newsnap)
# Here goes
u.docmd("sudo btrfs subvolume snapshot %s %s" % (oldvolume, newsnap))
# Repair ownership/permissions
repair(newsnap)
sys.stderr.write("... new snapshot %s created\n" % newsnap)
def rmvolsnap(volsnapname, which):
"""Remove an existing btrfs snapshot or subvolume."""
# Determine /ssd root
ssdroot = u.determine_btrfs_ssdroot(os.getcwd())
u.verbose(1, "ssdroot=%s" % ssdroot)
# Normalize snap name
volsnapname = normalize(ssdroot, volsnapname)
# Check for existence
oldvolsnap = "%s/%s" % (ssdroot, volsnapname)
if not os.path.exists(oldvolsnap):
u.error("unable to locate existing %s %s" % (which, oldvolsnap))
# Determine whether there is a parent uuid
isvol = -1
showlines = u.docmdlines("sudo btrfs subvolume show %s" % oldvolsnap)
if not showlines:
u.error("unable to get subvolume info for %s" % oldvolsnap)
matcher = re.compile(r"^\s*Parent uuid\:\s+(\S+).*$")
for line in showlines:
m = matcher.match(line)
if m:
puid = m.group(1)
if puid == "-":
isvol = 1
else:
isvol = 0
u.verbose(2, "isvol=%d for %s" % (isvol, oldvolsnap))
if isvol == -1:
u.warning("unable to determine snapshot/subvolume status for %s" %
oldvolsnap)
elif isvol == 0:
if which == "volume":
u.warning("%s appears to be snapshot, not subvolume" % oldvolsnap)
else:
if which == "snapshot":
u.warning("%s appears to be subvolume, not snapshot" % oldvolsnap)
# Here goes
rc = u.docmdnf("sudo btrfs subvolume delete %s" % oldvolsnap)
if rc != 0:
# Couldn't delete the subvolume. Suggest running lsof
sys.stderr.write("** deletion failed -- trying to determine open file:\n")
sys.stderr.write(" lsof +D %s\n"% oldvolsnap)
u.docmdnf("lsof +D %s\n" % oldvolsnap)
exit(1)
sys.stderr.write("... %s %s deleted\n" % (which, oldvolsnap))
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options] <subcommand> ...args...
options:
-d increase debug msg verbosity level
subcommands:
mkvol V creates new subvolume V
rmvol V remove existing subvolume V
mksnap E S create new snapshot "S" from existing volume/snapshot E
rmsnap S remove snapshot S
""" % os.path.basename(sys.argv[0]))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_subcommand, flag_subcommand_args
try:
optlist, args = getopt.getopt(sys.argv[1:], "d")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, _ in optlist:
if opt == "-d":
u.increment_verbosity()
if not args:
usage("specify subcommand")
flag_subcommand = args[0]
if flag_subcommand not in possible_subcommands:
usage("unknown subcommand %s" % flag_subcommand)
nargs = len(args) - 1
if nargs < 1:
usage("no subcommand arguments specified")
flag_subcommand_args = args[1:]
req_args = possible_subcommands[flag_subcommand]
if nargs != req_args:
usage("subcommand %s requires %d args, %d supplied" %
(flag_subcommand, req_args, nargs))
if not flag_homedir:
usage("environment variable HOME not set")
#
#......................................................................
#
# Main portion of script
#
parse_args()
u.setdeflanglocale()
if flag_subcommand == "mksnap":
mksnap_subcommand(flag_subcommand_args[0],
flag_subcommand_args[1])
elif flag_subcommand == "mkvol":
mkvol_subcommand(flag_subcommand_args[0])
elif flag_subcommand == "rmvol":
rmvolsnap(flag_subcommand_args[0], "volume")
elif flag_subcommand == "rmsnap":
rmvolsnap(flag_subcommand_args[0], "snapshot")
else:
u.error("internal error: bad subcommand %s" % flag_subcommand)
exit(0)
|
|
from datetime import date
from itertools import accumulate
from calendar import monthrange
from collections import Counter
from kernel.IssuesModel import backlogIssuesModel
from kconfig import calendar as FWcalendar
from kconfig import workGroupBook
from kernel.Recorder import Recorder
__author__ = "Manuel Escriche <mev@tid.es>"
class BacklogReporter:
def __init__(self, backlog):
self._length = len(backlog)
self.issueType = backlog.issueType
self.perspective = backlog.perspective
self.status = backlog.status
self.sprint_status = backlog.sprint_status
self.issueType_graph_data = self._issueType_graph_data(backlog)
self.perspective_graph_data = self._perspective_graph_data(backlog)
self.sprint_status_graph_data = self._sprint_status_graph_data(backlog)
self.burndown_graph_data = self._burndown_graph_data(backlog)
self.implemented_graph_data = self._implemented_graph_data(backlog)
def __len__(self):
return self._length
def _implemented_graph_data(self, backlog):
book = FWcalendar.monthBook
createdIssues = Counter(['{:02d}-{}'.format(issue.created.month, issue.created.year) for issue in backlog])
createdData = list(accumulate([createdIssues[book[month]] for month in FWcalendar.pastMonths]))
updatedIssues = Counter(['{:02d}-{}'.format(issue.updated.month, issue.updated.year) for issue in backlog])
updatedData = list(accumulate([updatedIssues[book[month]] for month in FWcalendar.pastMonths]))
closedIssues = [issue for issue in backlog if issue.status == 'Closed']
resolvedIssues = Counter(['{:02d}-{}'.format(issue.resolutionDate.month, issue.resolutionDate.year) for issue in closedIssues])
resolvedData = list(accumulate([resolvedIssues[book[month]] for month in FWcalendar.pastMonths]))
finishedIssues = [issue for issue in closedIssues if issue.frame in ('Working On','Implemented')]
releasedIssues = Counter(['{:02d}-{}'.format(issue.releaseDate.month, issue.releaseDate.year) for issue in finishedIssues])
progressData = [releasedIssues[book[month]] for month in FWcalendar.pastMonths]
releasedData = list(accumulate(progressData))
outdata = dict()
outdata['categories'] = FWcalendar.timeline
outdata['ncategories'] = len(FWcalendar.timeline) - 1
outdata['created'] = dict()
outdata['created']['type'] = 'spline'
outdata['created']['name'] = 'Created'
outdata['created']['data'] = createdData
outdata['resolved'] = dict()
outdata['resolved']['type'] = 'spline'
outdata['resolved']['name'] = 'Resolved'
outdata['resolved']['data'] = resolvedData
outdata['updated'] = dict()
outdata['updated']['type'] = 'spline'
outdata['updated']['name'] = 'Updated'
outdata['updated']['data'] = updatedData
outdata['released'] = dict()
outdata['released']['type'] = 'spline'
outdata['released']['name'] = 'Released'
outdata['released']['data'] = releasedData
outdata['progress'] = dict()
outdata['progress']['type'] = 'column'
outdata['progress']['name'] = 'Progress'
outdata['progress']['data'] = progressData
return outdata
def _issueType_graph_data(self, backlog):
count = backlog.issueType
return [[issueType, count[issueType]] for issueType in backlog.issueType ]
def _perspective_graph_data(self, backlog):
count = backlog.perspective
return [[frame, count[frame]] for frame in backlog.Perspectives]
def _sprint_status_graph_data(self, backlog):
count = backlog.sprint_status
return [[status, count[status]] for status in count]
def _burndown_graph_data(self, backlog):
issues = [issue for issue in backlog if issue.frame == 'Working On' \
and issue.issueType in backlogIssuesModel.shortTermTypes]
closedIssues = Counter([issue.updated.day for issue in issues if issue.status == 'Closed'])
# print(closedIssued)
NIssues = len(issues)
month_length = monthrange(date.today().year, date.today().month)[1]
data = [(day, closedIssues[day]) for day in range(1, date.today().day+1)]
# print(data)
data = zip([item[0] for item in data], accumulate([item[1] for item in data]))
data = {item[0]: NIssues-item[1] for item in data}
# print(data)
n = lambda x: NIssues/month_length if x > 0 else 0
ref_data = {day : n(day) for day in range(1, month_length+1)}
ref_data = dict(zip(ref_data.keys(), accumulate(ref_data.values())))
ref_data = {day : round(abs(NIssues-ref_data[day]), 1) for day in ref_data}
# print(ref_data)
cdata = lambda d: data[d] if d in data else 'null'
outdata = dict()
outdata['categories'] = [day for day in range(1, month_length+1)]
outdata['reference'] = dict()
outdata['reference']['type'] = 'spline'
outdata['reference']['name'] = 'Reference'
outdata['reference']['data'] = [ref_data[day] for day in range(1, month_length+1)]
outdata['reference']['marker'] = {'enabled': 'false'}
outdata['reference']['dashStyle'] = 'shortdot'
outdata['actual'] = dict()
outdata['actual']['type'] = 'spline'
outdata['actual']['name'] = 'Actual'
outdata['actual']['data'] = [cdata(day) for day in range(1, date.today().day+1)]
outdata['closed'] = dict()
outdata['closed']['type'] = 'column'
outdata['closed']['name'] = 'Closed'
outdata['closed']['data'] = [closedIssues[day] for day in range(1, date.today().day+1)]
return outdata
class WorkGroupReporter(BacklogReporter, Recorder):
def __init__(self, workgroup, backlog):
BacklogReporter.__init__(self, backlog)
Recorder.__init__(self, 'FIWARE.WorkGroupReporter.'+ workgroup.name + '.pkl')
self.groups = [workgroup.groups[group].name for group in workgroup.groups]
self.frame_status_graph_data = self._frame_status_graph_data(workgroup, backlog)
self.composition_graph_data = [[name, len([issue for issue in backlog if workgroup.groups[name].key in issue.component])]
for name in self.groups]
self.save()
def _frame_status_graph_data(self, workgroup, backlog):
frame = 'Working On'
issues = [issue for issue in backlog if issue.frame == frame and issue.component]
statuses = sorted(set([issue.status for issue in issues]))
workgroupIssuesBook = dict()
for key in workgroup.groups:
group = workgroup.groups[key]
workgroupIssuesBook[key] = Counter([issue.status for issue in issues if group.key in issue.component])
_frame_status_graph_data = []
for status in statuses:
status_dict = dict()
status_dict['name'] = status
status_dict['data'] = [workgroupIssuesBook[group][status] for group in workgroup.groups]
_frame_status_graph_data.append(status_dict)
return _frame_status_graph_data
@classmethod
def fromFile(cls, workgroup):
return super().fromFile('FIWARE.WorkGroupReporter.'+ workgroup.name + '.pkl')
class WorkGroupsReporter(BacklogReporter, Recorder):
def __init__(self, backlog):
BacklogReporter.__init__(self,backlog)
Recorder.__init__(self, 'FIWARE.WorkGroupsReporter.pkl')
self.workGroups = [workGroupBook[item].name for item in workGroupBook]
self.composition_graph_data = [[workGroupBook[item].name, len([issue for issue in backlog if issue.project == workGroupBook[item].tracker])]
for item in self.workGroups]
self.wg_sprint_status_graph_data = self._wg_sprint_status_graph_data(backlog)
self.save()
def _wg_sprint_status_graph_data(self, backlog):
frame = 'Working On'
issues = [issue for issue in backlog
if issue.frame == frame and issue.issueType in backlogIssuesModel.shortTermTypes]
statuses = sorted(set([issue.status for issue in issues]))
wgIssuesBook = dict()
for wgname in workGroupBook:
workgroup = workGroupBook[wgname]
wgIssuesBook[wgname] = Counter([issue.status for issue in issues if workgroup.tracker == issue.project ])
_frame_status_graph_data = []
for status in statuses:
status_dict = {}
status_dict['name'] = status
status_dict['data'] = [wgIssuesBook[wg][status] for wg in workGroupBook]
_frame_status_graph_data.append(status_dict)
return _frame_status_graph_data
@classmethod
def fromFile(cls, name):
return super().fromFile('FIWARE.WorkGroupsReporter.pkl')
class ChapterReporter:
"""
aggregated of enablers and tools' backlogs and worklists
"""
def __init__(self):
pass
if __name__ == "__main__":
pass
|
|
import sys
import subprocess
from urllib.parse import urlencode
import logging
import types
from functools import partial, wraps
from datetime import datetime, time, timezone
import warnings
from werkzeug.local import LocalProxy
from flask import g
from werkzeug.utils import import_string
import dateutil
class classproperty(property):
"""
A decorator that behaves like @property except that operates
on classes rather than instances.
Copy of sqlalchemy.util.langhelpers.classproperty, because first one executed
on class declaration.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
@classproperty
def NotImplementedProperty(self):
raise NotImplementedError()
NotImplementedClassProperty = NotImplementedProperty
class EntityLoggerAdapter(logging.LoggerAdapter):
"""
Adds info about the entity to the logged messages.
"""
def __init__(self, logger, entity):
self.logger = logger
self.entity = entity or '?'
def process(self, msg, kwargs):
return '[{}] {}'.format(self.entity, msg), kwargs
class ContextLoggerAdapter(logging.LoggerAdapter):
def bind(self, **extra):
return self.__class__(self.logger, {**self.extra.copy(), **extra})
def process(self, msg, kwargs):
return (
('%s %s'
% (' '.join('%s=%s' % (k, v) for k, v in self.extra.items()), msg)),
kwargs)
def resolve_obj_key(obj, key):
if key.isdigit():
try:
return obj[int(key)]
except Exception:
try:
return obj[key]
except Exception as exc:
raise ValueError('Could not resolve "{}" on {} object: {!r}'
.format(key, obj, exc))
else:
try:
return obj[key]
except Exception:
try:
return getattr(obj, key)
except Exception as exc:
raise ValueError('Could not resolve "{}" on {} object: {!r}'
.format(key, obj, exc))
def resolve_obj_path(obj, path, suppress_exc=False):
try:
dot_pos = path.find('.')
if dot_pos == -1:
return resolve_obj_key(obj, path)
else:
key, path = path[:dot_pos], path[(dot_pos + 1):]
return resolve_obj_path(resolve_obj_key(obj, key), path)
except Exception as exc:
if suppress_exc:
return exc
raise
class AttrDict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __dir__(self):
# Autocompletion for ipython
return super().__dir__() + list(self.keys())
def __getstate__(self):
# We need it for pickle because it depends on __getattr__
return dict(self)
def __setstate__(self, dict_):
self.update(dict_)
def maybe_attr_dict(data):
if isinstance(data, dict):
return AttrDict({k: maybe_attr_dict(v) for k, v in data.items()})
return data
def maybe_encode(data, encoding='utf-8'):
return data if isinstance(data, bytes) else str(data).encode('utf8')
def maybe_decode(data, encoding='utf-8'):
return data if isinstance(data, str) else data.decode(encoding)
def maybe_import_string(value):
return import_string(value) if isinstance(value, str) else value
def parse_timestamp(timestamp, tz=timezone.utc):
return datetime.utcfromtimestamp(float(timestamp)).replace(tzinfo=tz)
def parse_time(data):
hours, minutes, seconds, *_ = data.split(':') + [0, 0]
return time(int(hours), int(minutes), int(seconds))
def parse_datetime(data, tz=timezone.utc):
return maybe_tz(dateutil.parser.parse(data), tz)
def maybe_tz(dt, tz=timezone.utc):
if not dt.tzinfo:
return dt.replace(tzinfo=tz)
return dt
def utcnow():
return datetime.now(tz=timezone.utc)
def is_instance_or_proxied(obj, cls):
if isinstance(obj, LocalProxy):
obj = obj._get_current_object()
return isinstance(obj, cls)
def local_proxy_on_g(attr_name=None):
def decorator(func):
attr = attr_name or func.__name__
def wrapper():
if g:
if not hasattr(g, attr):
setattr(g, attr, func())
return getattr(g, attr)
return LocalProxy(wrapper)
return decorator
def decorator_with_default_args(target):
"""
This decorator should be used on other decorator that implements default kwargs,
and therefore may be used as @decorator, @decorator() or @decorator(key=ovveride_value).
Definition example:
@decorator_with_default_args
def my_decorator(func, key=default_value):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
"""
receipt = """
def my_decorator(func=None, **kwargs):
if func:
return my_decorator()(func)
def decorator(func):
return func
return decorator
"""
warnings.warn('decorator_with_default_args deprated. '
'Use this receipt instead: {}'.format(receipt),
DeprecationWarning)
def decorator(func=None, **kwargs):
if func and isinstance(func, types.FunctionType):
return target(func)
else:
assert not func, 'You should use this decorator only with kwargs'
return partial(target, **kwargs)
return decorator
def get_git_repository_info(path='./'):
if not hasattr(get_git_repository_info, '_info'):
get_git_repository_info._info = {}
info = get_git_repository_info._info
if path not in info:
try:
pipe = subprocess.Popen(['git', 'log', '-1', '--pretty=format:"%h|%ce|%cd"'],
stdout=subprocess.PIPE, cwd=path)
out, err = pipe.communicate()
info[path] = dict(zip(('rev', 'email', 'time'), out.split('|')))
except Exception:
# do not retry on first fail
info[path] = {}
# raise
return info[path]
def monkey_patch_meth(obj, attr, safe=True):
orig_func = getattr(obj, attr)
def decorator(func):
@wraps(orig_func)
def wrapper(*args, **kwargs):
return func(orig_func, *args, **kwargs)
flag_attr = '_monkey_patched_{}'.format(attr)
if not safe or not hasattr(obj, flag_attr):
setattr(obj, attr, wrapper)
if safe:
setattr(obj, flag_attr, True)
return decorator
def url_with_qs(url, **qs):
if qs and not url.endswith('?'):
url += '?'
return url + urlencode(qs)
def get_argv_opt(shortname=None, longname=None, is_bool=False):
"""
Simple and naive helper to get option from command line.
Returns None on any error.
"""
assert shortname or longname
if shortname:
assert shortname.startswith('-')
try:
x = sys.argv.index(shortname)
return True if is_bool else sys.argv[x + 1]
except (ValueError, IndexError):
pass
if longname:
assert longname.startswith('--')
for arg in sys.argv:
if arg.startswith(longname):
if is_bool and len(longname) == len(arg):
return True
if arg[len(longname)] == '=':
return arg[len(longname) + 1:]
|
|
def update_classes_table(filename):
import sqlite3
from datetime import datetime
import globalvars
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
# Make some fresh tables using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Classes;
DROP TABLE IF EXISTS Professors;
DROP TABLE IF EXISTS Sections;
DROP TABLE IF EXISTS Times;
DROP TABLE IF EXISTS Sections_Times;
DROP TABLE IF EXISTS Con_Time_Time;
DROP TABLE IF EXISTS Pref_Section_Section;
DROP TABLE IF EXISTS Pref2_Section_Section;
DROP TABLE IF EXISTS Con_Section_Section;
DROP TABLE IF EXISTS Year;
DROP TABLE IF EXISTS Division;
DROP TABLE IF EXISTS Skill;
DROP TABLE IF EXISTS Sections_Year;
DROP TABLE IF EXISTS Sections_Division;
DROP TABLE IF EXISTS Sections_Skill;
CREATE TABLE Classes (
ClassID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Name TEXT NOT NULL,
YearID INTEGER DEFAULT 0,
DivisionID INTEGER DEFAULT 0,
SkillID INTEGER DEFAULT 0,
ShortName TEXT UNIQUE,
Worth REAL
);
CREATE TABLE Professors (
ProfessorID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Name TEXT NOT NULL UNIQUE
);
CREATE TABLE Sections (
SectionID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Name TEXT NOT NULL,
ProfessorID INTEGER NOT NULL,
ClassID INTEGER NOT NULL,
NumberOpen INTEGER,
Seats INTEGER,
Room TEXT,
StudentID INTEGER DEFAULT 0,
Scheduled INTEGER DEFAULT 0,
Worth REAL
);
CREATE TABLE Times (
Time TEXT,
Start TEXT,
End TEXT,
Day TEXT,
TimeID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE
);
CREATE TABLE Sections_Times (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID INTEGER NOT NULL,
TimeID INTEGER NOT NULL
);
CREATE TABLE Con_Time_Time (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
TimeID1 INTEGER NOT NULL,
TimeID2 INTEGER NOT NULL
);
CREATE TABLE Pref_Section_Section (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID1 INTEGER NOT NULL,
SectionID2 INTEGER NOT NULL
);
CREATE TABLE Pref2_Section_Section (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID1 INTEGER NOT NULL,
SectionID2 INTEGER NOT NULL
);
CREATE TABLE Con_Section_Section (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID1 INTEGER NOT NULL,
SectionID2 INTEGER NOT NULL
);
CREATE TABLE Year (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Name TEXT
);
CREATE TABLE Division (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Name TEXT
);
CREATE TABLE Skill (
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Name TEXT
);
CREATE TABLE Sections_Year(
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID INTEGER NOT NULL,
YearID INTEGER NOT NULL
);
CREATE TABLE Sections_Division(
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID INTEGER NOT NULL,
DivisionID INTEGER NOT NULL
);
CREATE TABLE Sections_Skill(
PrimaryKey INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
SectionID INTEGER NOT NULL,
SkillID INTEGER NOT NULL
)
''')
# create YDS
years = [1,2,3,4,5,"any"]
skills = [1,2,3,4,5,"any"]
divisions = ["A", "B", "I", "O", "P", "any"]
for i in range(6):
cur.execute('INSERT OR IGNORE INTO Year (Name) VALUES(?)', (years[i],))
cur.execute('INSERT OR IGNORE INTO Division (Name) VALUES(?)', (divisions[i],))
cur.execute('INSERT OR IGNORE INTO Skill (Name) VALUES(?)', (skills[i],))
# update from each row in data/listings.csv
# as created by update_classes.py
#filename = 'data/listings.csv'
for entry in open(filename):
entry = entry.rstrip()
entry = entry.split(',')
# create and get ProfessorID
if len(entry) == 12:
professor = entry[10] + ', ' + entry[11]
professor = professor[1:-1]
cur.execute('''INSERT OR IGNORE INTO Professors (Name) VALUES(?)''', (professor,))
cur.execute('SELECT ProfessorID FROM Professors WHERE Name = ?', (professor, ))
ProfessorID = cur.fetchone()[0]
conn.commit()
if len(entry) == 11:
if entry[10] != '\xc2\xa0':
professor = entry[10].strip()
cur.execute('''INSERT OR IGNORE INTO Professors (Name) VALUES ( ? )''', (str(professor), ) )
cur.execute('SELECT ProfessorID FROM Professors WHERE Name = ?', (professor, ))
ProfessorID = cur.fetchone()[0]
conn.commit()
if entry[10] == '\xc2\xa0':
professor = 'Staff'
cur.execute('''INSERT OR IGNORE INTO Professors (Name)
VALUES ( ? )''', (str(professor), ) )
cur.execute('SELECT ProfessorID FROM Professors WHERE Name = ?', (professor, ))
ProfessorID = cur.fetchone()[0]
conn.commit()
# create and get ClassID
if len(entry) > 11:
classname = entry[1]
classother = entry[0]
shortname = classother[0:7]
worth = entry[2]
cur.execute('''INSERT OR IGNORE INTO Classes (Name, ShortName)
VALUES ( ?, ? )''', ( classname, shortname ) )
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ? ', (shortname, ))
ClassID = cur.fetchone()[0]
# create and get SectionID
sectionnum = classother[7:10]
try:
cur.execute('SELECT SectionID FROM Sections WHERE Name = ? and ClassID = ?', (sectionnum, ClassID))
SectionID = cur.fetchone()[0]
except:
cur.execute('''INSERT OR IGNORE INTO Sections
(Name, ProfessorID, ClassID, Worth) VALUES (?, ?, ?, ?)''', (sectionnum, ProfessorID, ClassID, worth) )
cur.execute('SELECT SectionID FROM Sections WHERE Name = ? and ClassID = ?', (sectionnum, ClassID))
SectionID = cur.fetchone()[0]
# get YDS
if entry[3] == 'any':
#for y in years[:-1]:
# cur.execute('SELECT PrimaryKey FROM Year WHERE Name = ?',(y,))
# YearID = cur.fetchone()[0]
cur.execute('UPDATE Classes SET YearID = ? WHERE ClassID = ?',(6, ClassID))
entry[3] = '12345'
if entry[3] != 'any':
for y in entry[3]:
cur.execute('SELECT PrimaryKey FROM Year WHERE Name = ?',(y,))
YearID = cur.fetchone()[0]
cur.execute('UPDATE Classes SET YearID = ? WHERE ClassID = ?',(YearID, ClassID))
if entry[4] == 'any':
#for y in divisions[:-1]:
# print y
# cur.execute('SELECT PrimaryKey FROM Division WHERE Name = ?',(y,))
# DivisionID = cur.fetchone()[0]
cur.execute('UPDATE Classes SET DivisionID = ? WHERE ClassID = ?',(6, ClassID))
entry[4] = 'ABIOP'
if entry[4] != 'any':
for y in entry[4]:
cur.execute('SELECT PrimaryKey FROM Division WHERE Name = ?',(y,))
DivisionID = cur.fetchone()[0]
cur.execute('UPDATE Classes SET DivisionID = ? WHERE ClassID = ?',(DivisionID, ClassID))
if entry[5] == 'any':
#for y in skills[:-1]:
# cur.execute('SELECT PrimaryKey FROM Skill WHERE Name = ?',(y,))
# SkillID = cur.fetchone()[0]
cur.execute('UPDATE Classes SET SkillID = ? WHERE ClassID = ?',(6, ClassID))
entry[5] = '12345'
if entry[5] != 'any':
for y in entry[5]:
cur.execute('SELECT PrimaryKey FROM Skill WHERE Name = ?',(y,))
SkillID = cur.fetchone()[0]
cur.execute('UPDATE Classes SET SkillID = ? WHERE ClassID = ?',(SkillID, ClassID))
year = entry[3]
if '1' in year:
cur.execute('INSERT OR IGNORE INTO Sections_Year (SectionID, YearID) VALUES (?, ?)', (SectionID, 1))
elif '2' in year:
cur.execute('INSERT OR IGNORE INTO Sections_Year (SectionID, YearID) VALUES (?, ?)', (SectionID, 2))
elif '3' in year:
cur.execute('INSERT OR IGNORE INTO Sections_Year (SectionID, YearID) VALUES (?, ?)', (SectionID, 3))
elif '4' in year:
cur.execute('INSERT OR IGNORE INTO Sections_Year (SectionID, YearID) VALUES (?, ?)', (SectionID, 4))
elif '5' in year:
cur.execute('INSERT OR IGNORE INTO Sections_Year (SectionID, YearID) VALUES (?, ?)', (SectionID, 5))
else:
cur.execute('INSERT OR IGNORE INTO Sections_Year (SectionID, YearID) VALUES (?, ?)', (SectionID, 6))
div = entry[4]
if 'A' in div:
cur.execute('INSERT OR IGNORE INTO Sections_Division (SectionID, DivisionID) VALUES (?, ?)', (SectionID, 1))
elif 'B' in div:
cur.execute('INSERT OR IGNORE INTO Sections_Division (SectionID, DivisionID) VALUES (?, ?)', (SectionID, 2))
elif 'I' in div:
cur.execute('INSERT OR IGNORE INTO Sections_Division (SectionID, DivisionID) VALUES (?, ?)', (SectionID, 3))
elif 'O' in div:
cur.execute('INSERT OR IGNORE INTO Sections_Division (SectionID, DivisionID) VALUES (?, ?)', (SectionID, 4))
elif 'P' in div:
cur.execute('INSERT OR IGNORE INTO Sections_Division (SectionID, DivisionID) VALUES (?, ?)', (SectionID, 5))
else:
cur.execute('INSERT OR IGNORE INTO Sections_Division (SectionID, DivisionID) VALUES (?, ?)', (SectionID, 6))
skill = entry[5]
if '1' in skill:
cur.execute('INSERT OR IGNORE INTO Sections_Skill (SectionID, SkillID) VALUES (?, ?)', (SectionID, 1))
if '2' in skill:
cur.execute('INSERT OR IGNORE INTO Sections_Skill (SectionID, SkillID) VALUES (?, ?)', (SectionID, 2))
if '3' in skill:
cur.execute('INSERT OR IGNORE INTO Sections_Skill (SectionID, SkillID) VALUES (?, ?)', (SectionID, 3))
if '4' in skill:
cur.execute('INSERT OR IGNORE INTO Sections_Skill (SectionID, SkillID) VALUES (?, ?)', (SectionID, 4))
if '5' in skill:
cur.execute('INSERT OR IGNORE INTO Sections_Skill (SectionID, SkillID) VALUES (?, ?)', (SectionID, 5))
if '6' in skill:
cur.execute('INSERT OR IGNORE INTO Sections_Skill (SectionID, SkillID) VALUES (?, ?)', (SectionID, 6))
# create and get TimeID
# clean up time entry
time = entry[8]
day = entry[7]
try:
end = time.split(' - ')[1].strip()
start = time.split(' - ')[0].strip()
except:
start = 'TBA'
end = 'TBA'
time = 'TBA'
cur.execute('SELECT TimeID FROM Times WHERE Time = ? and Day = ?', (time, day))
try:
data = cur.fetchone()[0]
except:
data = None
if data is None:
cur.execute('''INSERT OR IGNORE INTO Times (Start, End, Time, Day) VALUES (?, ?, ?, ?)''', (start, end, time, day))
cur.execute('''SELECT TimeID FROM Times WHERE Time = ? and Day = ?''', (time, day))
TimeID = cur.fetchone()[0]
else:
TimeID = data
# Correspond section and time
cur.execute('''INSERT OR IGNORE INTO Sections_Times (SectionID, TimeID) VALUES (?, ?)''', (SectionID, TimeID))
# create and get RoomID
room = entry[9]
# add seats and students scheduled to section
seats = entry[6]
seats = seats.split(' ')
nostu = seats[0]
seats = seats[2]
cur.execute('''UPDATE Sections
SET Room = ?, NumberOpen = ?, Seats = ?
WHERE SectionID = ?
''', (room, nostu, seats, SectionID))
# commit to database
conn.commit()
# Determine Time_Time Conflicts
days = ["M", "T", "W", "R", "F"]
for day in days:
times = cur.execute('''SELECT Start, End, TimeID FROM Times WHERE Day = ?''', (day, ) )
#endtimes = cur.execute('''SELECT End FROM Times WHERE Day = ?''', (day, ) )
id = []
start = []
end = []
for time in times:
start.append(datetime.strptime(str(time[0]), '%I:%M%p'))
end.append( datetime.strptime(str(time[1]), '%I:%M%p'))
id.append(int(str(time[2])))
for i in range(len(id)):
otherstarts = start[:i] + start[i+1:]
otherends = end[:i] + end[i+1:]
otherids = id[:i] + id[i+1:]
nowstart = start[i]
nowend = end[i]
cur.execute('''INSERT OR IGNORE INTO Con_Time_Time (TimeID1, TimeID2) VALUES (?,?)''',(id[i],id[i]))
for j in range(len(otherstarts)):
if nowstart > otherstarts[j] and nowstart < otherends[j]:
cur.execute('''INSERT OR IGNORE INTO Con_Time_Time (TimeID1,
TimeID2) VALUES (?, ?)''', (id[i], otherids[j]) )
cur.execute('''INSERT OR IGNORE INTO Con_Time_Time (TimeID1,
TimeID2) VALUES (?, ?)''', (otherids[j], id[i]) )
elif nowend > otherstarts[j] and nowend < otherends[j]:
cur.execute('''INSERT OR IGNORE INTO Con_Time_Time (TimeID1,
TimeID2) VALUES (?, ?)''', (id[i], otherids[j]) )
cur.execute('''INSERT OR IGNORE INTO Con_Time_Time (TimeID1,
TimeID2) VALUES (?, ?)''', (otherids[j], id[i]) )
conn.commit()
# Determine Section_Section Preferences
# preference for sections from same class
classes = cur.execute('''SELECT DISTINCT ClassID FROM Sections''')
classes = cur.fetchall()
for course in classes:
course = course[0]
sections = cur.execute('''SELECT SectionID FROM Sections WHERE ClassID = ?''', (course, ) )
sections = cur.fetchall()
for i in range(len(sections)):
othersections = sections[:i] + sections[i+1:]
for j in range(len(othersections)):
cur.execute('''INSERT OR IGNORE INTO Pref_Section_Section (SectionID1, SectionID2) VALUES (?, ?)''', (sections[i][0], othersections[j][0]))
conn.commit()
# preference for sections from same professor
classes = cur.execute('''SELECT DISTINCT ProfessorID FROM Sections''')
classes = cur.fetchall()
for course in classes:
course = course[0]
sections = cur.execute('''SELECT SectionID FROM Sections WHERE ProfessorID = ?''', (course, ) )
sections = cur.fetchall()
for i in range(len(sections)):
othersections = sections[:i] + sections[i+1:]
for j in range(len(othersections)):
cur.execute('''INSERT OR IGNORE INTO Pref2_Section_Section (SectionID1, SectionID2) VALUES (?, ?)''', (sections[i][0], othersections[j][0]))
conn.commit()
# Determine Section_Section Conflicts
sections = cur.execute('SELECT DISTINCT SectionID FROM Sections')
sections = cur.fetchall()
for section in sections:
section = section[0]
conflict = cur.execute('''
SELECT D.SectionID
FROM Sections_Times B Inner Join Con_Time_Time C
ON B.TimeID = C.TimeID1
INNER JOIN Sections_Times D
ON C.TimeID2 = D.TimeID
WHERE B.SectionID = ?''', (section, ) )
conflicts = cur.fetchall()
if len(conflicts) > 0:
for conflict in conflicts:
cur.execute('''INSERT OR IGNORE INTO Con_Section_Section
(SectionID1, SectionID2) VALUES (?, ?)''', (section, conflict[0]))
conn.commit()
|
|
import asyncio
import collections
import warnings
from typing import List # noqa
from typing import Awaitable, Callable, Generic, Optional, Tuple, TypeVar
from .base_protocol import BaseProtocol
from .helpers import BaseTimerContext, set_exception, set_result
from .log import internal_logger
try: # pragma: no cover
from typing import Deque # noqa
except ImportError:
from typing_extensions import Deque # noqa
__all__ = (
'EMPTY_PAYLOAD', 'EofStream', 'StreamReader', 'DataQueue',
'FlowControlDataQueue')
DEFAULT_LIMIT = 2 ** 16
_T = TypeVar('_T')
class EofStream(Exception):
"""eof stream indication."""
class AsyncStreamIterator(Generic[_T]):
def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None:
self.read_func = read_func
def __aiter__(self) -> 'AsyncStreamIterator[_T]':
return self
async def __anext__(self) -> _T:
try:
rv = await self.read_func()
except EofStream:
raise StopAsyncIteration # NOQA
if rv == b'':
raise StopAsyncIteration # NOQA
return rv
class ChunkTupleAsyncStreamIterator:
def __init__(self, stream: 'StreamReader') -> None:
self._stream = stream
def __aiter__(self) -> 'ChunkTupleAsyncStreamIterator':
return self
async def __anext__(self) -> Tuple[bytes, bool]:
rv = await self._stream.readchunk()
if rv == (b'', False):
raise StopAsyncIteration # NOQA
return rv
class AsyncStreamReaderMixin:
def __aiter__(self) -> AsyncStreamIterator[bytes]:
return AsyncStreamIterator(self.readline) # type: ignore
def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]:
"""Returns an asynchronous iterator that yields chunks of size n.
Python-3.5 available for Python 3.5+ only
"""
return AsyncStreamIterator(lambda: self.read(n)) # type: ignore
def iter_any(self) -> AsyncStreamIterator[bytes]:
"""Returns an asynchronous iterator that yields all the available
data as soon as it is received
Python-3.5 available for Python 3.5+ only
"""
return AsyncStreamIterator(self.readany) # type: ignore
def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:
"""Returns an asynchronous iterator that yields chunks of data
as they are received by the server. The yielded objects are tuples
of (bytes, bool) as returned by the StreamReader.readchunk method.
Python-3.5 available for Python 3.5+ only
"""
return ChunkTupleAsyncStreamIterator(self) # type: ignore
class StreamReader(AsyncStreamReaderMixin):
"""An enhancement of asyncio.StreamReader.
Supports asynchronous iteration by line, chunk or as available::
async for line in reader:
...
async for chunk in reader.iter_chunked(1024):
...
async for slice in reader.iter_any():
...
"""
total_bytes = 0
def __init__(self, protocol: BaseProtocol,
*, limit: int=DEFAULT_LIMIT,
timer: Optional[BaseTimerContext]=None,
loop: Optional[asyncio.AbstractEventLoop]=None) -> None:
self._protocol = protocol
self._low_water = limit
self._high_water = limit * 2
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._size = 0
self._cursor = 0
self._http_chunk_splits = None # type: Optional[List[int]]
self._buffer = collections.deque() # type: Deque[bytes]
self._buffer_offset = 0
self._eof = False
self._waiter = None # type: Optional[asyncio.Future[bool]]
self._eof_waiter = None # type: Optional[asyncio.Future[bool]]
self._exception = None # type: Optional[BaseException]
self._timer = timer
self._eof_callbacks = [] # type: List[Callable[[], None]]
def __repr__(self) -> str:
info = [self.__class__.__name__]
if self._size:
info.append('%d bytes' % self._size)
if self._eof:
info.append('eof')
if self._low_water != DEFAULT_LIMIT:
info.append('low=%d high=%d' % (self._low_water, self._high_water))
if self._waiter:
info.append('w=%r' % self._waiter)
if self._exception:
info.append('e=%r' % self._exception)
return '<%s>' % ' '.join(info)
def exception(self) -> Optional[BaseException]:
return self._exception
def set_exception(self, exc: BaseException) -> None:
self._exception = exc
self._eof_callbacks.clear()
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_exception(waiter, exc)
waiter = self._eof_waiter
if waiter is not None:
set_exception(waiter, exc)
self._eof_waiter = None
def on_eof(self, callback: Callable[[], None]) -> None:
if self._eof:
try:
callback()
except Exception:
internal_logger.exception('Exception in eof callback')
else:
self._eof_callbacks.append(callback)
def feed_eof(self) -> None:
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, True)
waiter = self._eof_waiter
if waiter is not None:
self._eof_waiter = None
set_result(waiter, True)
for cb in self._eof_callbacks:
try:
cb()
except Exception:
internal_logger.exception('Exception in eof callback')
self._eof_callbacks.clear()
def is_eof(self) -> bool:
"""Return True if 'feed_eof' was called."""
return self._eof
def at_eof(self) -> bool:
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
async def wait_eof(self) -> None:
if self._eof:
return
assert self._eof_waiter is None
self._eof_waiter = self._loop.create_future()
try:
await self._eof_waiter
finally:
self._eof_waiter = None
def unread_data(self, data: bytes) -> None:
""" rollback reading some data from stream, inserting it to buffer head.
"""
warnings.warn("unread_data() is deprecated "
"and will be removed in future releases (#3260)",
DeprecationWarning,
stacklevel=2)
if not data:
return
if self._buffer_offset:
self._buffer[0] = self._buffer[0][self._buffer_offset:]
self._buffer_offset = 0
self._size += len(data)
self._cursor -= len(data)
self._buffer.appendleft(data)
self._eof_counter = 0
# TODO: size is ignored, remove the param later
def feed_data(self, data: bytes, size: int=0) -> None:
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._size += len(data)
self._buffer.append(data)
self.total_bytes += len(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, False)
if (self._size > self._high_water and
not self._protocol._reading_paused):
self._protocol.pause_reading()
def begin_http_chunk_receiving(self) -> None:
if self._http_chunk_splits is None:
self._http_chunk_splits = []
def end_http_chunk_receiving(self) -> None:
if self._http_chunk_splits is None:
raise RuntimeError("Called end_chunk_receiving without calling "
"begin_chunk_receiving first")
if not self._http_chunk_splits or \
self._http_chunk_splits[-1] != self.total_bytes:
self._http_chunk_splits.append(self.total_bytes)
# wake up readchunk when end of http chunk received
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, False)
async def _wait(self, func_name: str) -> None:
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError('%s() called while another coroutine is '
'already waiting for incoming data' % func_name)
waiter = self._waiter = self._loop.create_future()
try:
if self._timer:
with self._timer:
await waiter
else:
await waiter
finally:
self._waiter = None
async def readline(self) -> bytes:
if self._exception is not None:
raise self._exception
line = []
line_size = 0
not_enough = True
while not_enough:
while self._buffer and not_enough:
offset = self._buffer_offset
ichar = self._buffer[0].find(b'\n', offset) + 1
# Read from current offset to found b'\n' or to the end.
data = self._read_nowait_chunk(ichar - offset if ichar else -1)
line.append(data)
line_size += len(data)
if ichar:
not_enough = False
if line_size > self._high_water:
raise ValueError('Line is too long')
if self._eof:
break
if not_enough:
await self._wait('readline')
return b''.join(line)
async def read(self, n: int=-1) -> bytes:
if self._exception is not None:
raise self._exception
# migration problem; with DataQueue you have to catch
# EofStream exception, so common way is to run payload.read() inside
# infinite loop. what can cause real infinite loop with StreamReader
# lets keep this code one major release.
if __debug__:
if self._eof and not self._buffer:
self._eof_counter = getattr(self, '_eof_counter', 0) + 1
if self._eof_counter > 5:
internal_logger.warning(
'Multiple access to StreamReader in eof state, '
'might be infinite loop.', stack_info=True)
if not n:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.readany() until EOF.
blocks = []
while True:
block = await self.readany()
if not block:
break
blocks.append(block)
return b''.join(blocks)
if not self._buffer and not self._eof:
await self._wait('read')
return self._read_nowait(n)
async def readany(self) -> bytes:
if self._exception is not None:
raise self._exception
if not self._buffer and not self._eof:
await self._wait('readany')
return self._read_nowait(-1)
async def readchunk(self) -> Tuple[bytes, bool]:
"""Returns a tuple of (data, end_of_http_chunk). When chunked transfer
encoding is used, end_of_http_chunk is a boolean indicating if the end
of the data corresponds to the end of a HTTP chunk , otherwise it is
always False.
"""
if self._exception is not None:
raise self._exception
if not self._buffer and not self._eof:
if (self._http_chunk_splits and
self._cursor == self._http_chunk_splits[0]):
# end of http chunk without available data
self._http_chunk_splits = self._http_chunk_splits[1:]
return (b"", True)
await self._wait('readchunk')
if not self._buffer and not self._http_chunk_splits:
# end of file
return (b"", False)
elif self._http_chunk_splits is not None:
while self._http_chunk_splits:
pos = self._http_chunk_splits[0]
self._http_chunk_splits = self._http_chunk_splits[1:]
if pos == self._cursor:
return (b"", True)
if pos > self._cursor:
return (self._read_nowait(pos-self._cursor), True)
return (self._read_nowait(-1), False)
else:
return (self._read_nowait_chunk(-1), False)
async def readexactly(self, n: int) -> bytes:
if self._exception is not None:
raise self._exception
blocks = [] # type: List[bytes]
while n > 0:
block = await self.read(n)
if not block:
partial = b''.join(blocks)
raise asyncio.streams.IncompleteReadError(
partial, len(partial) + n)
blocks.append(block)
n -= len(block)
return b''.join(blocks)
def read_nowait(self, n: int=-1) -> bytes:
# default was changed to be consistent with .read(-1)
#
# I believe the most users don't know about the method and
# they are not affected.
if self._exception is not None:
raise self._exception
if self._waiter and not self._waiter.done():
raise RuntimeError(
'Called while some coroutine is waiting for incoming data.')
return self._read_nowait(n)
def _read_nowait_chunk(self, n: int) -> bytes:
first_buffer = self._buffer[0]
offset = self._buffer_offset
if n != -1 and len(first_buffer) - offset > n:
data = first_buffer[offset:offset + n]
self._buffer_offset += n
elif offset:
self._buffer.popleft()
data = first_buffer[offset:]
self._buffer_offset = 0
else:
data = self._buffer.popleft()
self._size -= len(data)
self._cursor += len(data)
if self._size < self._low_water and self._protocol._reading_paused:
self._protocol.resume_reading()
return data
def _read_nowait(self, n: int) -> bytes:
chunks = []
while self._buffer:
chunk = self._read_nowait_chunk(n)
chunks.append(chunk)
if n != -1:
n -= len(chunk)
if n == 0:
break
return b''.join(chunks) if chunks else b''
class EmptyStreamReader(AsyncStreamReaderMixin):
def exception(self) -> Optional[BaseException]:
return None
def set_exception(self, exc: BaseException) -> None:
pass
def on_eof(self, callback: Callable[[], None]) -> None:
try:
callback()
except Exception:
internal_logger.exception('Exception in eof callback')
def feed_eof(self) -> None:
pass
def is_eof(self) -> bool:
return True
def at_eof(self) -> bool:
return True
async def wait_eof(self) -> None:
return
def feed_data(self, data: bytes, n: int=0) -> None:
pass
async def readline(self) -> bytes:
return b''
async def read(self, n: int=-1) -> bytes:
return b''
async def readany(self) -> bytes:
return b''
async def readchunk(self) -> Tuple[bytes, bool]:
return (b'', True)
async def readexactly(self, n: int) -> bytes:
raise asyncio.streams.IncompleteReadError(b'', n)
def read_nowait(self) -> bytes:
return b''
EMPTY_PAYLOAD = EmptyStreamReader()
class DataQueue(Generic[_T]):
"""DataQueue is a general-purpose blocking queue with one reader."""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._eof = False
self._waiter = None # type: Optional[asyncio.Future[bool]]
self._exception = None # type: Optional[BaseException]
self._size = 0
self._buffer = collections.deque() # type: Deque[Tuple[_T, int]]
def __len__(self) -> int:
return len(self._buffer)
def is_eof(self) -> bool:
return self._eof
def at_eof(self) -> bool:
return self._eof and not self._buffer
def exception(self) -> Optional[BaseException]:
return self._exception
def set_exception(self, exc: BaseException) -> None:
self._eof = True
self._exception = exc
waiter = self._waiter
if waiter is not None:
set_exception(waiter, exc)
self._waiter = None
def feed_data(self, data: _T, size: int=0) -> None:
self._size += size
self._buffer.append((data, size))
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, True)
def feed_eof(self) -> None:
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, False)
async def read(self) -> _T:
if not self._buffer and not self._eof:
assert not self._waiter
self._waiter = self._loop.create_future()
try:
await self._waiter
except (asyncio.CancelledError, asyncio.TimeoutError):
self._waiter = None
raise
if self._buffer:
data, size = self._buffer.popleft()
self._size -= size
return data
else:
if self._exception is not None:
raise self._exception
else:
raise EofStream
def __aiter__(self) -> AsyncStreamIterator[_T]:
return AsyncStreamIterator(self.read)
class FlowControlDataQueue(DataQueue[_T]):
"""FlowControlDataQueue resumes and pauses an underlying stream.
It is a destination for parsed data."""
def __init__(self, protocol: BaseProtocol, *,
limit: int=DEFAULT_LIMIT,
loop: asyncio.AbstractEventLoop) -> None:
super().__init__(loop=loop)
self._protocol = protocol
self._limit = limit * 2
def feed_data(self, data: _T, size: int=0) -> None:
super().feed_data(data, size)
if self._size > self._limit and not self._protocol._reading_paused:
self._protocol.pause_reading()
async def read(self) -> _T:
try:
return await super().read()
finally:
if self._size < self._limit and self._protocol._reading_paused:
self._protocol.resume_reading()
|
|
"""Tests for the OAuth applications web API,."""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.utils import six
from djblets.db.query import get_object_or_none
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import DOES_NOT_EXIST
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.oauth.forms import ApplicationChangeForm
from reviewboard.oauth.models import Application
from reviewboard.site.models import LocalSite
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (oauth_app_item_mimetype,
oauth_app_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
from reviewboard.webapi.tests.urls import (get_oauth_app_item_url,
get_oauth_app_list_url)
def _compare_item(self, item_rsp, app):
self.assertEqual(item_rsp['authorization_grant_type'],
app.authorization_grant_type)
self.assertEqual(item_rsp['client_id'], app.client_id)
self.assertEqual(item_rsp['client_secret'], app.client_secret)
self.assertEqual(item_rsp['client_type'], app.client_type)
self.assertEqual(item_rsp['id'], app.pk)
self.assertEqual(item_rsp['name'], app.name)
if app.redirect_uris:
uris = {uri.strip() for uri in app.redirect_uris.split(',')}
else:
uris = set()
self.assertEqual(set(item_rsp['redirect_uris']), uris)
self.assertEqual(item_rsp['skip_authorization'], app.skip_authorization)
self.assertEqual(item_rsp['links']['user']['title'], app.user.username)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ExtraDataListMixin, BaseWebAPITestCase):
"""Testing the OAuthApplicationResource list APIs."""
resource = resources.oauth_app
sample_api_url = 'oauth-apps/'
fixtures = ['test_users']
compare_item = _compare_item
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
if populate_items:
if with_local_site:
local_site = LocalSite.objects.get(name=local_site_name)
else:
local_site = None
items = [
Application.objects.create(user=user, local_site=local_site),
]
else:
items = []
return (get_oauth_app_list_url(local_site_name=local_site_name),
oauth_app_list_mimetype,
items)
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_filtered(self):
"""Testing the GET <URL> API only returns filtered applications"""
admin = User.objects.get(username='admin')
local_site = LocalSite.objects.get(pk=1)
applications = set(filter(
lambda a: a.local_site is None and a.user_id == self.user.pk,
self._make_applications([self.user, admin], local_site),
))
rsp = self.api_get(get_oauth_app_list_url(),
{},
expected_mimetype=oauth_app_list_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(applications,
self._applications_from_response(rsp['oauth_apps']))
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_filtered_with_localsite(self):
"""Testing the GET <URL> API only returns filtered applications on a
LocalSite
"""
admin = User.objects.get(username='admin')
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
applications = self._make_applications(
users=[self.user, admin],
local_site=local_site,
predicate=lambda a: (a.local_site == local_site and
a.user == self.user),
)
rsp = self.api_get(get_oauth_app_list_url(local_site.name),
{},
expected_mimetype=oauth_app_list_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(applications,
self._applications_from_response(rsp['oauth_apps']))
@add_fixtures(['test_site'])
@webapi_test_template
def test_superuser_get(self):
"""Testing the GET <URL> API as a superuser"""
self.user = self._login_user(local_site=False, admin=True)
local_site = LocalSite.objects.get(pk=1)
doc = User.objects.get(username='doc')
applications = self._make_applications(
users=[self.user, doc],
local_site=local_site,
predicate=lambda a: a.local_site is None,
)
rsp = self.api_get(get_oauth_app_list_url(),
{},
expected_mimetype=oauth_app_list_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(applications,
self._applications_from_response(rsp['oauth_apps']))
@add_fixtures(['test_site'])
@webapi_test_template
def test_superuser_get_local_site(self):
"""Testing the GET <URL> API with a LocalSite as a superuser"""
self.user = self._login_user(local_site=False, admin=True)
local_site = LocalSite.objects.get(pk=1)
doc = User.objects.get(username='doc')
applications = self._make_applications(
users=[self.user, doc],
local_site=local_site,
predicate=lambda a: a.local_site == local_site,
)
rsp = self.api_get(get_oauth_app_list_url(local_site.name),
{},
expected_mimetype=oauth_app_list_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(applications,
self._applications_from_response(rsp['oauth_apps']))
def _applications_from_response(self, item_rsps):
"""Return the Application instances for the given item responses.
Args:
item_rsps (list):
The individual item responses.
Returns:
set of reviewboard.oauth.models.Application:
The matching applications.
"""
return set(Application.objects.filter(
pk__in=(item['id'] for item in item_rsps),
))
def _make_applications(self, users, local_site, predicate=None):
"""Create some applications for testing:
Args:
users (list of django.contrib.auth.models.User):
The users to create applications for.
local_site (reviewboard.site.models.LocalSite):
A LocalSite.
predicate (callable, optional):
An optional callable predicate to filter the results.
Returns:
set of reviewboard.oauth.models.Application:
The created applications.
"""
applications = set()
applications.update(
self.create_oauth_application(u, None, name='%s-app' % u.username)
for u in users
)
applications.update(
self.create_oauth_application(u, local_site,
name='%s-site-app' % u.username)
for u in users
)
if predicate:
applications = set(filter(predicate, applications))
return applications
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
if post_valid_data:
post_data = {
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
}
else:
post_data = {}
return (get_oauth_app_list_url(local_site_name),
oauth_app_item_mimetype,
post_data,
[])
def check_post_result(self, user, rsp):
app = Application.objects.get(pk=rsp['oauth_app']['id'])
self.compare_item(rsp['oauth_app'], app)
@webapi_test_template
def test_post_grant_implicit_no_uris(self):
"""Testing the POST <URL> API with GRANT_IMPLICIT and no URIs"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='',
grant_type=Application.GRANT_IMPLICIT,
is_valid=False,
)
@webapi_test_template
def test_post_grant_implicit_uris(self):
"""Testing the POST <URL> API with GRANT_IMPLICIT and URIs"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='https://example.com/',
grant_type=Application.GRANT_IMPLICIT,
is_valid=True,
)
@webapi_test_template
def test_post_grant_authorization_code_no_uris(self):
"""Testing the POST <URL> API with GRANT_AUTHORIZATION_CODE and no URIs
"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
is_valid=False,
)
@webapi_test_template
def test_post_grant_authorization_code_uris(self):
"""Testing the POST <URL> API with GRANT_AUTHORIZATION_CODE and URIs"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='http://example.com',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
is_valid=True,
)
@webapi_test_template
def test_post_grant_password_no_uris(self):
"""Testing the POST <URL> API with GRANT_PASSWORD and no URIs"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='',
grant_type=Application.GRANT_PASSWORD,
is_valid=True,
)
@webapi_test_template
def test_post_grant_password_uris(self):
"""Testing the POST <URL> API with GRANT_PASSWORD and URIs"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='http://example.com',
grant_type=Application.GRANT_PASSWORD,
is_valid=True,
)
@webapi_test_template
def test_post_grant_client_credentials_no_uris(self):
"""Testing the POST <URL> API with GRANT_CLIENT_CREDENTIALS and no URIs
"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='',
grant_type=Application.GRANT_CLIENT_CREDENTIALS,
is_valid=True,)
@webapi_test_template
def test_post_grant_client_credentials_uris(self):
"""Testing the POST <URL> API with GRANT_CLIENT_CREDENTIALS and URIs"""
self._test_post_redirect_uri_grant_combination(
redirect_uris='http://example.com',
grant_type=Application.GRANT_CLIENT_CREDENTIALS,
is_valid=True,
)
@webapi_test_template
def test_post_set_user(self):
"""Testing the POST <URL> API with user set"""
rsp = self.api_post(
get_oauth_app_list_url(),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'user': 'doc',
},
expected_status=400,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('user', rsp['fields'])
self.assertEqual(rsp['fields']['user'],
['You do not have permission to set this field.'])
@webapi_test_template
def test_post_set_user_as_superuser(self):
"""Testing the POST <URL> API as a superuser with user set"""
self._login_user(admin=True)
rsp = self.api_post(
get_oauth_app_list_url(),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'user': 'doc',
},
expected_mimetype=oauth_app_item_mimetype,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
app = Application.objects.get(pk=rsp['oauth_app']['id'])
self.compare_item(rsp['oauth_app'], app)
self.assertEqual(app.user.username, 'doc')
@webapi_test_template
def test_post_set_user_as_superuser_not_exists(self):
"""Testing the POST <URL> API as a superuser with user set as a
non-existent user
"""
self._login_user(admin=True)
rsp = self.api_post(
get_oauth_app_list_url(),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'user': 'foofoo',
},
expected_status=400,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('user', rsp['fields'])
self.assertEqual(rsp['fields']['user'],
['The user "foofoo" does not exist.'])
@add_fixtures(['test_site'])
@webapi_test_template
def test_post_set_user_as_local_site_admin(self):
"""Testing the POST <URL> API as a LocalSite admin with user set"""
self._login_user(admin=True, local_site=True)
local_site = LocalSite.objects.get(name=self.local_site_name)
local_site.users.add(User.objects.get(username='dopey'))
rsp = self.api_post(
get_oauth_app_list_url(self.local_site_name),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'user': 'dopey',
},
expected_mimetype=oauth_app_item_mimetype,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
app = Application.objects.get(pk=rsp['oauth_app']['id'])
self.compare_item(rsp['oauth_app'], app)
self.assertEqual(app.user.username, 'dopey')
@add_fixtures(['test_site'])
@webapi_test_template
def test_post_set_user_as_local_site_admin_with_non_local_site_user(self):
"""Testing the POST <URL> API as a LocalSite admin with user set to a
non-LocalSite user
"""
self._login_user(admin=True, local_site=True)
rsp = self.api_post(
get_oauth_app_list_url(self.local_site_name),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'user': 'dopey',
},
expected_status=400,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('user', rsp['fields'])
self.assertEqual(
rsp['fields']['user'],
['The user "dopey" does not exist.'],
)
@webapi_test_template
def test_post_set_skip_authorization(self):
"""Testing the POST <URL> API with skip_authorization set"""
rsp = self.api_post(
get_oauth_app_list_url(),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'skip_authorization': '1',
},
expected_status=400,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('skip_authorization', rsp['fields'])
self.assertEqual(rsp['fields']['skip_authorization'],
['You do not have permission to set this field.'])
@webapi_test_template
def test_post_set_skip_authorization_as_superuser(self):
"""Testing the POST <URL> API as a superuser with skip_authorization"""
self._login_user(admin=True)
rsp = self.api_post(
get_oauth_app_list_url(),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'skip_authorization': '1',
},
expected_mimetype=oauth_app_item_mimetype,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
app = Application.objects.get(pk=rsp['oauth_app']['id'])
self.compare_item(rsp['oauth_app'], app)
self.assertEqual(app.skip_authorization, True)
@add_fixtures(['test_site'])
@webapi_test_template
def test_post_set_skip_authorization_as_local_site_admin(self):
"""Testing the POST <URL> API as a LocalSite admin with
skip_authorization set
"""
self._login_user(admin=True, local_site=True)
rsp = self.api_post(
get_oauth_app_list_url(self.local_site_name),
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-application',
'redirect_uris': 'https://example.com/oauth/',
'skip_authorization': '1',
},
expected_mimetype=oauth_app_item_mimetype,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
app = Application.objects.get(pk=rsp['oauth_app']['id'])
self.compare_item(rsp['oauth_app'], app)
self.assertEqual(app.skip_authorization, True)
def _test_post_redirect_uri_grant_combination(self, redirect_uris,
grant_type, is_valid):
"""Test the redirect_uris and grant type are valid or invalid.
Args:
redirect_uris (unicode):
A space-separated list of redirect URIs.
grant_type (unicode):
The grant type.
is_valid (bool):
Whether or not the given combination is valid. This determines
the testing done on the response.
"""
post_data = {
'authorization_grant_type': grant_type,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test-app',
'redirect_uris': redirect_uris,
'skip_authorization': '0',
}
if is_valid:
rsp = self.api_post(get_oauth_app_list_url(),
post_data,
expected_mimetype=oauth_app_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.compare_item(rsp['oauth_app'],
Application.objects.get(name='test-app'))
else:
rsp = self.api_post(get_oauth_app_list_url(),
post_data,
expected_status=400)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('err', rsp)
self.assertIn('fields', rsp)
self.assertIn('redirect_uris', rsp['fields'])
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ExtraDataItemMixin, BaseWebAPITestCase):
"""Testing the OAuthApplicationResource item APIs."""
resource = resources.oauth_app
sample_api_url = 'oauth-apps/<app-id>/'
fixtures = ['test_users']
not_owner_status_code = 404
not_owner_error = DOES_NOT_EXIST
compare_item = _compare_item
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
app = self.create_oauth_application(user,
with_local_site=with_local_site)
return (get_oauth_app_item_url(app.pk, local_site_name),
oauth_app_item_mimetype,
app)
@webapi_test_template
def test_get_without_owner(self):
"""Testing the GET <URL> API without owner"""
app = self.create_oauth_application(User.objects.get(username='admin'))
self.api_get(get_oauth_app_item_url(app.pk),
expected_status=404)
@webapi_test_template
def test_get_without_owner_as_superuser(self):
"""Testing the GET <URL> API without owner as superuser"""
self.user = self._login_user(admin=True)
app = self.create_oauth_application(User.objects.get(username='doc'))
rsp = self.api_get(get_oauth_app_item_url(app.pk),
expected_mimetype=oauth_app_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('oauth_app', rsp)
self.compare_item(rsp['oauth_app'], app)
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_without_local_site(self):
"""Testing the GET <URL> API for an app related to a LocalSite"""
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
app = self.create_oauth_application(
self.user,
local_site=LocalSite.objects.get(pk=1))
rsp = self.api_get(get_oauth_app_item_url(app.pk),
expected_status=404)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_with_invalid_local_site(self):
"""Testing the GET <URL> API with an app related to a LocalSite not
using the LocalSite's API
"""
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
app = self.create_oauth_application(self.user)
rsp = self.api_get(get_oauth_app_item_url(app.pk, local_site.name),
expected_status=404)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_without_owner_as_local_site_admin(self):
"""Testing the GET <URL> API without owner on a LocalSite as a
LocalSite admin
"""
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
app = self.create_oauth_application(self.user, local_site=local_site)
self.user = self._login_user(admin=True, local_site=True)
rsp = self.api_get(get_oauth_app_item_url(app.pk, local_site.name),
expected_mimetype=oauth_app_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('oauth_app', rsp)
self.compare_item(rsp['oauth_app'], app)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
app = self.create_oauth_application(user,
with_local_site=with_local_site)
if put_valid_data:
request_data = {
'extra_data.fake_key': '',
}
else:
request_data = {
'user': 'admin',
}
return (get_oauth_app_item_url(app.pk, local_site_name),
oauth_app_item_mimetype,
request_data,
app,
[])
def check_put_result(self, user, item_rsp, app):
app = Application.objects.get(pk=app.pk)
self.compare_item(item_rsp, app)
@add_fixtures(['test_site'])
@webapi_test_template
def test_put_re_enable_security_disabled(self):
"""Testing the PUT <URL> API with enabled=1 for an application disabled
due to security
"""
self.user = self._login_user(admin=True)
doc = User.objects.get(username='doc')
local_site = LocalSite.objects.get(pk=1)
app = self.create_oauth_application(user=doc, local_site=local_site)
original_secret = app.client_secret
local_site.users.remove(doc)
app = Application.objects.get(pk=app.pk)
self.assertTrue(app.is_disabled_for_security)
self.assertEqual(app.user, self.user)
self.assertEqual(app.original_user, doc)
rsp = self.api_put(get_oauth_app_item_url(app.pk, local_site.name),
{'enabled': '1'},
expected_status=400)
app = Application.objects.get(pk=app.pk)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('__all__', rsp['fields'])
self.assertEqual(rsp['fields']['__all__'][0],
ApplicationChangeForm.DISABLED_FOR_SECURITY_ERROR)
self.assertEqual(app.original_user, doc)
self.assertEqual(app.client_secret, original_secret)
@webapi_test_template
def test_put_regenerate_secret_key(self):
"""Testing the PUT <URL> API with regenerate_client_secret=1"""
app = self.create_oauth_application(user=self.user)
original_secret = app.client_secret
rsp = self.api_put(get_oauth_app_item_url(app.pk),
{'regenerate_client_secret': 1},
expected_mimetype=oauth_app_item_mimetype)
app = Application.objects.get(pk=app.pk)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.compare_item(rsp['oauth_app'], app)
self.assertNotEqual(app.client_secret, original_secret)
@add_fixtures(['test_site'])
@webapi_test_template
def test_put_regenerate_secret_key_enable(self):
"""Testing the PUT <URL> API with regenerate_secret_key=1 and enabled=1
"""
self.user = self._login_user(admin=True)
doc = User.objects.get(username='doc')
local_site = LocalSite.objects.get(pk=1)
app = self.create_oauth_application(user=doc, local_site=local_site)
original_secret = app.client_secret
local_site.users.remove(doc)
app = Application.objects.get(pk=app.pk)
self.assertTrue(app.is_disabled_for_security)
self.assertEqual(app.user, self.user)
self.assertEqual(app.original_user, doc)
rsp = self.api_put(
get_oauth_app_item_url(app.pk, local_site.name),
{
'enabled': '1',
'regenerate_client_secret': '1',
},
expected_mimetype=oauth_app_item_mimetype)
app = Application.objects.get(pk=app.pk)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
item_rsp = rsp['oauth_app']
self.compare_item(item_rsp, app)
self.assertNotEqual(item_rsp['client_secret'], original_secret)
self.assertFalse(app.is_disabled_for_security)
self.assertIsNone(app.original_user)
self.assertTrue(app.enabled)
self.assertNotEqual(app.client_secret, original_secret)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
app = self.create_oauth_application(user=user,
with_local_site=with_local_site)
return (get_oauth_app_item_url(app.pk, local_site_name),
[app.pk])
def check_delete_result(self, user, app_pk):
self.assertIsNone(get_object_or_none(Application, pk=app_pk))
|
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for dnsmkusergroup
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import os
import sys
import socket
import threading
import time
import getpass
import unittest
import roster_core
import roster_server
from roster_user_tools import roster_client_lib
USER_CONFIG = 'test_data/roster_user_tools.conf'
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
HOST = u'localhost'
USERNAME = u'sharrell'
PASSWORD = u'test'
KEYFILE=('test_data/dnsmgmt.key.pem')
CERTFILE=('test_data/dnsmgmt.cert.pem')
CREDFILE='%s/.dnscred' % os.getcwd()
EXEC = '../roster-user-tools/scripts/dnsmkusergroup'
class options(object):
password = u'test'
username = u'sharrell'
server = None
ldap = u'ldaps://ldap.cs.university.edu:636'
credfile = CREDFILE
view_name = None
ip_address = None
target = u'machine1'
ttl = 64
class DaemonThread(threading.Thread):
def __init__(self, config_instance, port):
threading.Thread.__init__(self)
self.config_instance = config_instance
self.port = port
self.daemon_instance = None
def run(self):
self.daemon_instance = roster_server.Server(self.config_instance, KEYFILE,
CERTFILE)
self.daemon_instance.Serve(port=self.port)
class Testdnsmkusergroup(unittest.TestCase):
def setUp(self):
def PickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, 0))
addr, port = s.getsockname()
s.close()
return port
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.port = PickUnusedPort()
self.server_name = 'https://%s:%s' % (HOST, self.port)
self.daemon_thread = DaemonThread(self.config_instance, self.port)
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.core_instance = roster_core.Core(USERNAME, self.config_instance)
self.password = 'test'
time.sleep(1)
roster_client_lib.GetCredentials(USERNAME, u'test', credfile=CREDFILE,
server_name=self.server_name)
def tearDown(self):
if( os.path.exists(CREDFILE) ):
os.remove(CREDFILE)
def testMakeUserGroupUserGroupAssignments(self):
output = os.popen('python %s user -n new_user '
'-a 128 '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ADDED USER: username: new_user access_level: 128\n')
output.close()
output = os.popen('python %s assignment -n new_user -g cs '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ADDED USER_GROUP_ASSIGNMENT: username: new_user group: cs\n')
output.close()
output = os.popen('python %s assignment -n new_user -g cs '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: User-Group assignment "new_user-cs" already exists\n')
output.close()
self.assertEqual(self.core_instance.ListUsers(),
{u'shuey': 64, u'new_user': 128, u'jcollins': 32,
u'tree_export_user': 0, u'sharrell': 128})
self.assertEqual(self.core_instance.ListGroups(), [u'bio', u'cs', u'eas'])
self.assertEqual(self.core_instance.ListUserGroupAssignments(),
{u'shuey': [u'bio', u'cs'], u'new_user': [u'cs'],
u'sharrell': [u'cs']})
def testMakeUserGroupUserGroupAssignmentsWithStringAccessLevel(self):
output = os.popen('python %s user -n new_dns_admin '
'-a dns_admin '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ADDED USER: username: new_dns_admin access_level: 128\n')
output.close()
output = os.popen('python %s user -n new_unlocked_user '
'-a unlocked_user '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ADDED USER: username: new_unlocked_user access_level: 64\n')
output.close()
output = os.popen('python %s user -n new_user '
'-a user '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ADDED USER: username: new_user access_level: 32\n')
output.close()
output = os.popen('python %s assignment -n new_user -g cs '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ADDED USER_GROUP_ASSIGNMENT: username: new_user group: cs\n')
output.close()
self.assertEqual(self.core_instance.ListUsers(),
{u'shuey': 64, u'new_user': 128, u'jcollins': 32,
u'tree_export_user': 0, u'sharrell': 128, u'new_dns_admin': 128,
u'new_unlocked_user': 64, u'new_user': 32})
self.assertEqual(self.core_instance.ListGroups(), [u'bio', u'cs', u'eas'])
self.assertEqual(self.core_instance.ListUserGroupAssignments(),
{u'shuey': [u'bio', u'cs'], u'new_user': [u'cs'],
u'sharrell': [u'cs']})
def testMakeUserWithZone(self):
self.core_instance.MakeZone(u'test_zone', u'master', u'here.')
output = os.popen('python %s group -g testgroup '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED GROUP: group: testgroup\n')
output.close()
output = os.popen('python %s user -n new_user -a 128 '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED USER: username: new_user access_level: 128\n')
output.close()
output = os.popen('python %s assignment -n new_user -g testgroup '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED USER_GROUP_ASSIGNMENT: username: new_user group: testgroup\n')
output.close()
output = os.popen('python %s forward -z test_zone -g testgroup '
'--group-permission a,aaaa,cname '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED FORWARD_ZONE_PERMISSION: zone_name: test_zone group: '
'testgroup group_permission: [\'a\', \'aaaa\', \'cname\']\n')
output.close()
self.assertEqual(self.core_instance.ListForwardZonePermissions(),
{u'bio': [{'zone_name': u'bio.university.edu',
'group_permission': [u'a', u'aaaa']}],
u'testgroup': [{'zone_name': u'test_zone',
'group_permission': [u'a', u'aaaa',
u'cname']}],
u'cs': [{'zone_name': u'cs.university.edu',
'group_permission': [u'a', u'aaaa', u'cname',
u'ns', u'soa']},
{'zone_name': u'eas.university.edu',
'group_permission': [u'a', u'aaaa',
u'cname']}]})
output = os.popen('python %s user -n newuser --access-level 128 '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED USER: username: newuser access_level: 128\n')
output.close()
output = os.popen('python %s assignment -n newuser -g testgroup '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED USER_GROUP_ASSIGNMENT: username: newuser group: testgroup\n')
output.close()
output = os.popen('python %s reverse -g testgroup -z test_zone '
'-b 192.168.1.4/30 --group-permission cname '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'CLIENT ERROR: The -z/--zone-name flag cannot be used with the reverse '
'command.\n')
output.close()
output = os.popen('python %s reverse -g testgroup '
'-b 192.168.1.4/30 --group-permission cname,ptr '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED REVERSE_RANGE_PERMISSION: cidr_block: 192.168.1.4/30 '
'group: testgroup group_permission: [\'cname\', \'ptr\']\n')
output.close()
self.assertEqual(self.core_instance.ListReverseRangePermissions(),
{u'bio':
[{'cidr_block': u'192.168.0.0/24',
'group_permission': [u'cname', u'ptr']},
{'cidr_block': u'192.168.1.0/24',
'group_permission': [u'ptr']}],
u'testgroup':
[{'cidr_block': u'192.168.1.4/30',
'group_permission': [u'cname', u'ptr']}],
u'cs': [{'cidr_block': u'192.168.0.0/24',
'group_permission': [u'cname', u'ns', u'ptr',
u'soa']}]})
def testMakeZoneAssignments(self):
self.core_instance.MakeGroup(u'test_group')
self.core_instance.MakeZone(u'test_zone', u'master', u'here.')
output = os.popen('python %s reverse -z test_zone -b '
'192.168.1.0/24 -g test_group --group-permission '
'cname,ptr -s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'CLIENT ERROR: The -z/--zone-name flag cannot be used with the reverse '
'command.\n')
output.close()
output = os.popen('python %s reverse -b 192.168.1.0/24 '
'-g test_group --group-permission cname,ptr,ns '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(
output.read(),
'ADDED REVERSE_RANGE_PERMISSION: cidr_block: 192.168.1.0/24 '
'group: test_group group_permission: [\'cname\', \'ptr\', \'ns\']\n')
output.close()
self.assertEqual(self.core_instance.ListReverseRangePermissions(),
{u'bio':
[{'cidr_block': u'192.168.0.0/24',
'group_permission': [u'cname', u'ptr']},
{'cidr_block': u'192.168.1.0/24',
'group_permission': [u'ptr']}],
u'test_group':
[{'cidr_block': u'192.168.1.0/24',
'group_permission': [u'cname', u'ptr', u'ns']}],
u'cs':
[{'cidr_block': u'192.168.0.0/24',
'group_permission': [u'cname', u'ns', u'ptr',
u'soa']}]})
def testMakeGroup(self):
output = os.popen('python %s group -g test_group '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(), 'ADDED GROUP: group: test_group\n')
output.close()
self.assertEqual(self.core_instance.ListGroups(), [u'bio', u'cs',
u'eas', u'test_group'])
def testErrors(self):
output = os.popen('python %s user -n jcollins '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: The -a/--access-level flag is required.\n')
output.close()
output = os.popen('python %s user -n dchayes '
'-s %s -u %s -a super-user -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'ERROR: KeyError: \'super-user\'\n')
output.close()
output = os.popen('python %s user -n jcollins -g cs '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: The -g/--group flag cannot be used with the user '
'command.\n')
output.close()
output = os.popen('python %s user -n jcollins '
'-a 128 '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: Username already exists.\n')
output.close()
output = os.popen('python %s assignment -n newuser '
'-g fakegroup '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(), 'CLIENT ERROR: Group does not exist.\n')
output.close()
self.core_instance.MakeZone(u'test_zone', u'master', u'here.')
self.core_instance.MakeGroup(u'testgroup')
output = os.popen('python %s forward '
'-g testgroup -z test_zone --group-permission x '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'USER ERROR: Invalid data type GroupPermission for '
'group_forward_permissions_group_permission: x\n')
output.close()
# check duplicate group permission assignment
output = os.popen('python %s forward -z test_zone -g testgroup '
'--group-permission soa,ns,soa -s %s -u %s -p %s '
'--config-file %s' % (EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(), 'CLIENT ERROR: Duplicate permission: soa\n')
output.close()
# also check duplicate reverse range group permission
output = os.popen('python %s reverse -b 192.168.0.1/24 -g testgroup '
'--group-permission soa,ptr,soa -s %s -u %s -p %s '
'--config-file %s' % (EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: Duplicate permission found: soa\n')
output.close()
if( __name__ == '__main__' ):
unittest.main()
|
|
from __future__ import print_function, unicode_literals
from six.moves.http_client import HTTPMessage
from six.moves.urllib_parse import urlparse
from copy import deepcopy
from .content import decoders
from .messages import split_start_line, HTTPRequest, HTTPResponse
from .encapsulated import encapsulated_offsets
from .response import (BadComposition,
RequestURITooLong,
NoModificationsNeeded)
IEOF = object()
CRLF = b'\r\n'
# Maximum from BaseHTTPServer seems reasonable
MAX_REQUEST_LEN = 65536
class ChunkError(Exception):
""" Something is wrong with the chunks """
class LineReader(object):
""" Reads lines and keeps track of offset """
def __init__(self, raw):
self.raw = raw
self.offset = 0
def readline(self, *args, **kwargs):
line = self.raw.readline(*args, **kwargs)
self.offset += len(line)
return line
class ICAPRequest(HTTPMessage):
def __init__(self, rfile, method, uri, protocol):
HTTPMessage.__init__(self, rfile)
self.method = method
self.uri = uri
parsed_uri = urlparse(uri)
self.abs_path = parsed_uri.path
self.protocol = protocol
self.preview = self.get('preview') and int(self.get('preview'))
self.http_request = None
self.http_response = None
self.preview_chunks = []
self.send_continue_after_preview = None
self.null_body = True
self.eof = False
def content_decoder(self):
if self.http_response:
encoding = self.http_response.get('content-encoding', 'identity')
else:
encoding = self.http_request.get('content-encoding', 'identity')
return decoders[encoding]
@property
def close_connection(self):
return self.get('connection', '').lower().strip() == 'close'
def unmodified(self):
# XXX: check for self.preview is None and then return OK(..., chunks=self.chunks)?
self.eof = True
return NoModificationsNeeded()
def modify_http_request(self, decode=True):
# we cannot copy the `fp`
del self.http_request.fp
http_request = deepcopy(self.http_request)
if decode:
decoder = self.content_decoder()
http_request['content-encoding'] = 'identity'
chunks = decoder(self.chunks)
else:
chunks = self.chunks
return http_request, chunks
def modify_http_response(self, decode=True):
# we cannot copy the `fp`
del self.http_response.fp
http_response = deepcopy(self.http_response)
# remove content-length for modified responses
del http_response['content-length']
if decode:
decoder = self.content_decoder()
http_response['content-encoding'] = 'identity'
chunks = decoder(self.chunks)
else:
chunks = self.chunks
return http_response, chunks
@classmethod
def parse(cls, rfile, send_continue_after_preview=None):
line = rfile.readline(MAX_REQUEST_LEN + 1)
if not line:
return None
if len(line) > MAX_REQUEST_LEN:
raise RequestURITooLong()
method, uri, protocol = split_start_line(line)
request = cls(rfile, method, uri, protocol)
request.send_continue_after_preview = send_continue_after_preview
request.read_encapsulated_http(rfile)
request.read_preview()
request.chunks = request._chunks()
return request
def read_encapsulated_http(self, rfile):
encapsulated = self.get('encapsulated')
if encapsulated is None:
self.eof = True
self.null_body = True
return
reader = LineReader(rfile)
for name, offset in encapsulated_offsets(encapsulated):
if name.endswith('-body'):
if reader.offset!= offset:
reason = "offset '%s' (%d != %d)" % (name, offset, reader.offset)
raise BadComposition(reason=reason)
if name == 'null-body':
self.eof = True
else:
self.null_body = False
elif name == 'req-hdr':
self.http_request = HTTPRequest.parse(reader)
elif name == 'res-hdr':
self.http_response = HTTPResponse.parse(reader)
def read_preview(self):
size = self.get('preview')
if size is None:
return
self.preview_chunks = []
if self.eof:
return
for chunk in read_chunks(self.fp):
if chunk is not IEOF:
assert not self.eof
self.preview_chunks.append(chunk)
else:
self.eof = True
def continue_after_preview(self):
if self.eof:
return False
if self.send_continue_after_preview and self.preview is not None:
self.send_continue_after_preview()
self.send_continue_after_preview = False
return True
def _chunks(self):
for chunk in self.preview_chunks:
yield chunk
if not self.continue_after_preview():
return
for chunk in read_chunks(self.fp):
if chunk is IEOF:
raise ChunkError("ieof after preview")
yield chunk
def read_chunks(rfile):
readline = rfile.readline
read = rfile.read
while True:
chunk_size_line = readline()
chunk_size, sep, chunk_extension = chunk_size_line.partition(';')
chunk_size = int(chunk_size, 16)
if sep == ';':
if chunk_extension.strip() == 'ieof':
if int(chunk_size) != 0:
raise ChunkError("ieof with non-zero size")
yield IEOF
chunk = read(chunk_size)
crlf = read(2)
if crlf != CRLF:
raise ChunkError("found %r expecting CRLF" % crlf)
if not chunk:
break
yield chunk
|
|
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
#### time parameters ####
#########################
T = 100
R = 1
#### model parameters ####
##########################
Np = 200
weight_limit = 1 / (Np ** 3.0)
ess = 10.0 / Np
# nu = 1.0 / (2 ** Np)
phi = 0.23
sigma = 0.1
vol = [-2.5, -0.5]
p00 = 0.95
p11 = 0.01
#### Utils ####
###############
def p_ess(particles):
return 1 / sum(np.square(particles["weights"]))
def g_ess(particles):
return 1 / (max(particles["weights"]))
def markov_switch(i, p00, p11):
u = np.random.uniform()
if (i == 0):
if u <= p00:
return 0
else:
return 1
else:
if u <= p11:
return 1
else:
return 0
def generate_states(ht, j):
h_tp1 = np.random.normal(loc=vol[j] + phi * ht, scale=sigma)
y_tp1 = np.random.normal(loc=0.0, scale=np.exp(h_tp1 / 2))
return [h_tp1, y_tp1]
def generate_path(T):
path = {"state": [], "obs": []}
H = [np.random.normal(loc=vol[0],scale=sigma)]
Y = []
j = 0
for t in range(T):
j = markov_switch(j, p00, p11)
state = generate_states(H[t], j)
H.append(state[0])
Y.append(state[1])
path["state"] = H
path["obs"] = Y
return path
#### SIR Particle Filter ####
#############################
def process(ht, j):
h_tp1 = vol[j] + phi * ht
return h_tp1
def prediction(particles, Np):
for i in range(Np):
particles["switch"][i] = markov_switch(particles["switch"][i], p00, p11)
particles["p_ant"][i] = particles["p"][i]
particles["p"][i] = np.random.normal(loc=process(particles["p_ant"][i], particles["switch"][i]), scale=sigma)
return particles
def update_weights(particles, observation, Np):
for i in range(Np):
p = particles["p"][i]
particles["weights"][i] = norm.pdf(observation, 0, np.exp(p))
if (particles["weights"][i] < weight_limit):
particles["weights"][i] = weight_limit
particles["weights"] /= sum(particles["weights"])
return particles
def resampling(particles, Np):
indexes = np.random.choice(Np, Np, p=particles["weights"])
Zt = particles["p"][:]
for i in range(Np):
particles["p"][i] = Zt[indexes[i]]
particles["weights"][i] = 1.0 / Np
particles = prediction(particles, Np)
return particles
def init_forward(m0, s0, Np):
particles = {"p": [], "p_ant": [], "weights": [], "switch": []}
h0 = np.random.normal(m0, s0, Np)
w = []
for i in range(Np):
w.append(1.0 / Np)
particles["p"].append(h0[i])
particles["p_ant"].append(0)
particles["switch"].append(0)
particles["weights"] = np.array(w)
return particles
def forward_sir_filter(m0, s0, path, T):
particles = init_forward(m0, s0, Np)
P = [particles]
r = 0
for t in range(0, T, 1):
obs = path["obs"][t]
p = {}
particles = prediction(particles, Np)
particles = update_weights(particles, obs, Np)
p["weights"] = particles["weights"][:]
if 1.0 / g_ess(particles) >= ess:
r = r + 1
particles = resampling(particles, Np)
p["p"] = particles["p"][:]
p["p_ant"] = particles["p_ant"][:]
P.append(p)
print("forward resampling rate: " + str(r * 100 / T) + "%")
return P
#### Auxiliary Particle Filter ####
###################################
def mean_apf(particles):
for i in range(Np):
particles["switch"][i] = markov_switch(particles["switch"][i], p00, p11)
particles["mean"][i] = vol[particles["switch"][i]] + phi * particles["p"][i]
return particles
def index_apf(particles, observation, Np):
indexes = []
for i in range(Np):
w_obs = norm.pdf(observation, 0, np.exp(particles["mean"][i]))
if w_obs < weight_limit:
w_obs = weight_limit
prob = particles["weights"] * w_obs
prob /= sum(prob)
indexes.append(np.random.choice(Np, 1, p=prob)[0])
return indexes
def process_apf(ht, j):
h_tp1 = vol[j] + phi * ht
return h_tp1
def prediction_apf(particles, Np, indexes):
for i in range(Np):
particles["p_ant"][i] = particles["p"][i]
particles["p"][i] = np.random.normal(loc=process_apf(particles["p_ant"][indexes[i]], particles["switch"][i]), scale=sigma)
return particles
def update_weights_apf(particles, observation, Np, indexes):
for i in range(Np):
p = particles["p"][i]
m = particles["mean"][indexes[i]]
particles["weights"][i] = norm.pdf(observation, 0, np.exp(p)) / norm.pdf(observation, 0, np.exp(m))
if (particles["weights"][i] < weight_limit):
particles["weights"][i] = weight_limit
particles["weights"] /= sum(particles["weights"])
return particles
def init_forward_apf(m0, s0, Np):
particles = {"p": [], "p_ant": [], "weights": [], "switch": [], "mean":[]}
h0 = np.random.normal(m0, s0, Np)
w = []
for i in range(Np):
w.append(1.0 / Np)
particles["p"].append(h0[i])
particles["p_ant"].append(0)
particles["switch"].append(0)
particles["mean"].append(h0[i])
particles["weights"] = np.array(w)
return particles
def forward_aux_filter(m0, s0, path, T):
particles = init_forward_apf(m0, s0, Np)
P = [particles]
for t in range(0, T, 1):
obs = path["obs"][t]
p = {}
particles = mean_apf(particles)
indexes = index_apf(particles, obs, Np)
particles = prediction_apf(particles, Np, indexes)
particles = update_weights_apf(particles, obs, Np, indexes)
p["weights"] = particles["weights"][:]
p["p"] = particles["p"][:]
p["p_ant"] = particles["p_ant"][:]
P.append(p)
return P
#### Particle Smoother ####
###########################
def particle_smoother(m0, s0, path, T):
p_forward = forward_aux_filter(m0, s0, path, T)
Xest = []
for t in range(2, T, 1):
p_f = p_forward[t - 1]
w_f = p_forward[t - 1]["weights"]
xsum = np.multiply(np.transpose(p_f["p"])[0], w_f)
Xest.append(sum(xsum))
return Xest
#### Simulations ####
#####################
path = generate_path(T)
X = np.transpose(path["state"])
Y = np.transpose(path["obs"])
Xest = np.zeros((R, T - 2))
Mest = np.zeros((R, T - 3))
for i in range(R):
print("Run " + str(i + 1))
Xest[i] = particle_smoother(0, 0.01, path, T)
linex, = plt.plot(range(T - 2), X[2:T], label="X")
liney, = plt.plot(range(T - 2), Y[2:T], label="Y")
linexest, = plt.plot(range(T - 2), np.mean(Xest, axis=0), label="Xest")
plt.xlabel("Time")
plt.ylabel("Value")
plt.legend(handles=[linexest, linex, liney])
plt.show()
|
|
#!/usr/bin/env python3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parser for linker map files.
The format of a linker map file depends on the linker that generates it. This
file uses "coded linker name" to identify formats and variants:
'gold': The gold linker (usage is being deprecated by Chrome).
'lld_v0': LLD linker (no LTO), old format.
'lld-lto_v0': LLD linker with ThinLTO, old format.
'lld_v1': LLD linker (no LTO), new format.
'lld-lto_v1': LLD linker with ThinLTO, new format.
The |linker_name| parameter in various functions must take one of the above
coded linker name values.
"""
import argparse
import code
import collections
import itertools
import logging
import os
import re
import readline
import sys
import demangle
import models
# About linker maps:
# * "Discarded input sections" include symbols merged with other symbols
# (aliases), so the information there is not actually a list of unused things.
# * Linker maps include symbols that do not have names (with object path),
# whereas "nm" skips over these (they don't account for much though).
# * The parse time for compressed linker maps is dominated by ungzipping.
_STRIP_NAME_PREFIX = {
models.FLAG_STARTUP: 8,
models.FLAG_UNLIKELY: 9,
models.FLAG_REL_LOCAL: 10,
models.FLAG_REL: 4,
models.FLAG_HOT: 4,
}
def _FlagsFromMangledName(name):
# Currently, lld map files have section = '.text.startup' and put the symbol
# name in the section break-down ("level 3 symbols").
if name.startswith('startup.') or name == 'startup':
return models.FLAG_STARTUP
if name.startswith('unlikely.'):
return models.FLAG_UNLIKELY
if name.startswith('rel.local.'):
return models.FLAG_REL_LOCAL
if name.startswith('rel.'):
return models.FLAG_REL
if name.startswith('hot.'):
return models.FLAG_HOT
return 0
def _NormalizeName(name):
# Outlined functions have names like OUTLINED_FUNCTION_0, which can
# appear 1000+ time, and can cause false aliasing. We treat these as
# special cases by designating them as a placeholder symbols and
# renaming them to '** outlined function'.
if name.startswith('OUTLINED_FUNCTION_'):
return '** outlined function'
if name.startswith('.L.str'):
return models.STRING_LITERAL_NAME
if name.endswith(' (.cfi)'):
return name[:-7]
return name
class MapFileParserGold(object):
"""Parses a linker map file from gold linker."""
# Map file writer for gold linker:
# https://github.com/gittup/binutils/blob/HEAD/gold/mapfile.cc
def __init__(self):
self._common_symbols = []
self._symbols = []
self._section_ranges = {}
self._lines = None
def Parse(self, lines):
"""Parses a linker map file.
Args:
lines: Iterable of lines, the first of which has been consumed to
identify file type.
Returns:
A tuple of (section_ranges, symbols, extras).
"""
self._lines = iter(lines)
logging.debug('Scanning for Header')
while True:
line = self._SkipToLineWithPrefix('Common symbol', 'Memory map')
if line.startswith('Common symbol'):
self._common_symbols = self._ParseCommonSymbols()
logging.debug('.bss common entries: %d', len(self._common_symbols))
continue
elif line.startswith('Memory map'):
self._ParseSections()
break
return self._section_ranges, self._symbols, {}
def _SkipToLineWithPrefix(self, prefix, prefix2=None):
for l in self._lines:
if l.startswith(prefix) or (prefix2 and l.startswith(prefix2)):
return l
def _ParsePossiblyWrappedParts(self, line, count):
parts = line.split(None, count - 1)
if not parts:
return None
if len(parts) != count:
line = next(self._lines)
parts.extend(line.split(None, count - len(parts) - 1))
assert len(parts) == count, 'parts: ' + ' '.join(parts)
parts[-1] = parts[-1].rstrip()
return parts
def _ParseCommonSymbols(self):
# Common symbol size file
#
# ff_cos_131072 0x40000 obj/third_party/<snip>
# ff_cos_131072_fixed
# 0x20000 obj/third_party/<snip>
ret = []
next(self._lines) # Skip past blank line
name, size_str, path = None, None, None
for l in self._lines:
parts = self._ParsePossiblyWrappedParts(l, 3)
if not parts:
break
name, size_str, path = parts
sym = models.Symbol(
models.SECTION_BSS,
int(size_str[2:], 16),
full_name=name,
object_path=path)
ret.append(sym)
return ret
def _ParseSections(self):
# .text 0x0028c600 0x22d3468
# .text.startup._GLOBAL__sub_I_bbr_sender.cc
# 0x0028c600 0x38 obj/net/net/bbr_sender.o
# .text._reset 0x00339d00 0xf0 obj/third_party/icu/icuuc/ucnv.o
# ** fill 0x0255fb00 0x02
# .text._ZN4base8AutoLockD2Ev
# 0x00290710 0xe obj/net/net/file_name.o
# 0x00290711 base::AutoLock::~AutoLock()
# 0x00290711 base::AutoLock::~AutoLock()
# .text._ZNK5blink15LayoutBlockFlow31mustSeparateMarginAfterForChildERK...
# 0xffffffffffffffff 0x46 obj/...
# 0x006808e1 blink::LayoutBlockFlow::...
# .text.OUTLINED_FUNCTION_0
# 0x002a2000 0x20 obj/net/net/tag.o
# .bss
# .bss._ZGVZN11GrProcessor11initClassIDI10LightingFPEEvvE8kClassID
# 0x02d4b294 0x4 obj/skia/skia/SkLightingShader.o
# 0x02d4b294 guard variable for void GrProcessor::ini...
# .data 0x0028c600 0x22d3468
# .data.rel.ro._ZTVN3gvr7android19ScopedJavaGlobalRefIP12_jfloatArrayEE
# 0x02d1e668 0x10 ../third_party/.../libfoo.a(bar.o)
# 0x02d1e668 vtable for gvr::android::GlobalRef<_jflo...
# ** merge strings
# 0x0255fb00 0x1f2424
# ** merge constants
# 0x0255fb00 0x8
# ** common 0x02db5700 0x13ab48
syms = self._symbols
while True:
line = self._SkipToLineWithPrefix('.')
if not line:
break
section_name = None
try:
# Parse section name and size.
parts = self._ParsePossiblyWrappedParts(line, 3)
if not parts:
break
section_name, section_address_str, section_size_str = parts
section_address = int(section_address_str[2:], 16)
section_size = int(section_size_str[2:], 16)
self._section_ranges[section_name] = (section_address, section_size)
if (section_name in models.BSS_SECTIONS
or section_name in (models.SECTION_RODATA, models.SECTION_TEXT)
or section_name.startswith(models.SECTION_DATA)):
logging.info('Parsing %s', section_name)
if section_name in models.BSS_SECTIONS:
# Common symbols have no address.
syms.extend(self._common_symbols)
prefix_len = len(section_name) + 1 # + 1 for the trailing .
symbol_gap_count = 0
merge_symbol_start_address = section_address
sym_count_at_start = len(syms)
line = next(self._lines)
# Parse section symbols.
while True:
if not line or line.isspace():
break
if line.startswith(' **'):
zero_index = line.find('0')
if zero_index == -1:
# Line wraps.
name = line.strip()
line = next(self._lines)
else:
# Line does not wrap.
name = line[:zero_index].strip()
line = line[zero_index:]
address_str, size_str = self._ParsePossiblyWrappedParts(line, 2)
line = next(self._lines)
# These bytes are already accounted for.
if name == '** common':
continue
address = int(address_str[2:], 16)
size = int(size_str[2:], 16)
path = None
sym = models.Symbol(section_name, size, address=address,
full_name=name, object_path=path)
syms.append(sym)
if merge_symbol_start_address > 0:
merge_symbol_start_address += size
else:
# A normal symbol entry.
subsection_name, address_str, size_str, path = (
self._ParsePossiblyWrappedParts(line, 4))
size = int(size_str[2:], 16)
assert subsection_name.startswith(section_name), (
'subsection name was: ' + subsection_name)
mangled_name = subsection_name[prefix_len:]
name = None
address_str2 = None
while True:
line = next(self._lines).rstrip()
if not line or line.startswith(' .'):
break
# clang includes ** fill, but gcc does not.
if line.startswith(' ** fill'):
# Alignment explicitly recorded in map file. Rather than
# record padding based on these entries, we calculate it
# using addresses. We do this because fill lines are not
# present when compiling with gcc (only for clang).
continue
elif line.startswith(' **'):
break
elif name is None:
address_str2, name = self._ParsePossiblyWrappedParts(line, 2)
if address_str == '0xffffffffffffffff':
# The section needs special handling (e.g., a merge section)
# It also generally has a large offset after it, so don't
# penalize the subsequent symbol for this gap (e.g. a 50kb gap).
# There seems to be no corelation between where these gaps occur
# and the symbols they come in-between.
# TODO(agrieve): Learn more about why this happens.
if address_str2:
address = int(address_str2[2:], 16) - 1
elif syms and syms[-1].address > 0:
# Merge sym with no second line showing real address.
address = syms[-1].end_address
else:
logging.warning('First symbol of section had address -1')
address = 0
merge_symbol_start_address = address + size
else:
address = int(address_str[2:], 16)
# Finish off active address gap / merge section.
if merge_symbol_start_address:
merge_size = address - merge_symbol_start_address
merge_symbol_start_address = 0
if merge_size > 0:
# merge_size == 0 for the initial symbol generally.
logging.debug('Merge symbol of size %d found at:\n %r',
merge_size, syms[-1])
# Set size=0 so that it will show up as padding.
sym = models.Symbol(
section_name, 0,
address=address,
full_name='** symbol gap %d' % symbol_gap_count)
symbol_gap_count += 1
syms.append(sym)
# .text.res_findResource_60
# 0x00178de8 0x12a obj/...
# 0x00178de9 res_findResource_60
# .text._ZN3url6ParsedC2Ev
# 0x0021ad62 0x2e obj/url/url/url_parse.o
# 0x0021ad63 url::Parsed::Parsed()
# .text.unlikely._ZN4base3CPUC2Ev
# 0x003f9d3c 0x48 obj/base/base/cpu.o
# 0x003f9d3d base::CPU::CPU()
full_name = name or mangled_name
if mangled_name and (not name or mangled_name.startswith('_Z') or
'._Z' in mangled_name):
full_name = mangled_name
flags = _FlagsFromMangledName(mangled_name)
if full_name:
if flags:
full_name = full_name[_STRIP_NAME_PREFIX[flags]:]
else:
full_name = _NormalizeName(full_name)
sym = models.Symbol(section_name, size, address=address,
full_name=full_name, object_path=path,
flags=flags)
syms.append(sym)
logging.debug('Symbol count for %s: %d', section_name,
len(syms) - sym_count_at_start)
except:
logging.error('Problem line: %r', line)
logging.error('In section: %r', section_name)
raise
class MapFileParserLld(object):
"""Parses a linker map file from LLD."""
# Map file writer for LLD linker (for ELF):
# https://github.com/llvm-mirror/lld/blob/HEAD/ELF/MapFile.cpp
_LINE_RE_V0 = re.compile(r'([0-9a-f]+)\s+([0-9a-f]+)\s+(\d+) ( *)(.*)')
_LINE_RE_V1 = re.compile(
r'\s*[0-9a-f]+\s+([0-9a-f]+)\s+([0-9a-f]+)\s+(\d+) ( *)(.*)')
_LINE_RE = [_LINE_RE_V0, _LINE_RE_V1]
def __init__(self, linker_name):
self._linker_name = linker_name
self._common_symbols = []
self._section_ranges = {}
@staticmethod
def ParseArmAnnotations(tok):
"""Decides whether a Level 3 token is an annotation.
Returns:
A 2-tuple (is_annotation, next_thumb2_mode):
is_annotation: Whether |tok| is an annotation.
next_thumb2_mode: New |thumb2_mode| value, or None if keep old value.
"""
# Annotations for ARM match '$t', '$d.1', but not '$_21::invoke'.
if tok.startswith('$') and (len(tok) == 2 or
(len(tok) >= 3 and tok[2] == '.')):
if tok.startswith('$t'):
return True, True # Is annotation, enter Thumb2 mode.
if tok.startswith('$a'):
return True, False # Is annotation, enter ARM32 mode.
return True, None # Is annotation, keep old |thumb2_mode| value.
return False, None # Not annotation, keep old |thumb2_mode| value.
def Tokenize(self, lines):
"""Generator to filter and tokenize linker map lines."""
# Extract e.g., 'lld_v0' -> 0, or 'lld-lto_v1' -> 1.
map_file_version = int(self._linker_name.split('_v')[1])
pattern = MapFileParserLld._LINE_RE[map_file_version]
# A Level 3 symbol can have |size == 0| in some situations (e.g., assembly
# code symbols). To provided better size estimates in this case, the "span"
# of a Level 3 symbol is computed as:
# (A) The |address| difference compared to the next Level 3 symbol.
# (B) If the Level 3 symbol is the last among Level 3 lines nested under a
# Level 2 line: The difference between the Level 3 symbol's |address|
# and the containing Level 2 line's end address.
# To handle (A), |lines| is visited using a one-step lookahead, using
# |sentinel| to handle the last line. To handle (B), |level2_end_address| is
# computed for each Level 2 line.
sentinel = '0 0 0 0 THE_END'
assert pattern.match(sentinel)
level2_end_address = None
thumb2_mode = False
(line, address, size, level, tok) = (None, None, None, None, None)
for next_line in itertools.chain(lines, (sentinel,)):
m = pattern.match(next_line)
if m is None:
continue
next_address = int(m.group(1), 16)
next_size = int(m.group(2), 16)
next_level = (len(m.group(4)) // 8) + 1 # Add 1 to agree with comments.
next_tok = m.group(5)
if next_level == 3:
assert level >= 2, 'Cannot jump from Level 1 to Level 3.'
# Detect annotations. If found, maybe update |thumb2_mode|, then skip.
(is_annotation, next_thumb2_mode) = (
MapFileParserLld.ParseArmAnnotations(next_tok))
if is_annotation:
if next_thumb2_mode:
thumb2_mode = next_thumb2_mode
continue # Skip annotations.
if thumb2_mode:
# Adjust odd address to even. Alignment is not guanteed for all
# symbols (e.g., data, or x86), so this is judiciously applied.
next_address &= ~1
else:
thumb2_mode = False # Resets on leaving Level 3.
if address is not None:
span = None
if level == 3:
span = next_address if next_level == 3 else level2_end_address
span -= address
elif level == 2:
level2_end_address = address + size
yield (line, address, size, level, span, tok)
line = next_line
address = next_address
size = next_size
level = next_level
tok = next_tok
def Parse(self, lines):
"""Parses a linker map file.
Args:
lines: Iterable of lines, the first of which has been consumed to
identify file type.
Returns:
A tuple of (section_ranges, symbols).
"""
# Newest format:
# VMA LMA Size Align Out In Symbol
# 194 194 13 1 .interp
# 194 194 13 1 <internal>:(.interp)
# 1a8 1a8 22d8 4 .ARM.exidx
# 1b0 1b0 8 4 obj/sandbox/syscall.o:(.ARM.exidx)
# 400 400 123400 64 .text
# 600 600 14 4 ...:(.text.OUTLINED_FUNCTION_0)
# 600 600 0 1 $x.3
# 600 600 14 1 OUTLINED_FUNCTION_0
# 123800 123800 20000 256 .rodata
# 123800 123800 4 4 ...:o:(.rodata._ZN3fooE.llvm.1234)
# 123800 123800 4 1 foo (.llvm.1234)
# 123804 123804 4 4 ...:o:(.rodata.bar.llvm.1234)
# 123804 123804 4 1 bar.llvm.1234
# Older format:
# Address Size Align Out In Symbol
# 00000000002002a8 000000000000001c 1 .interp
# 00000000002002a8 000000000000001c 1 <internal>:(.interp)
# ...
# 0000000000201000 0000000000000202 16 .text
# 0000000000201000 000000000000002a 1 /[...]/crt1.o:(.text)
# 0000000000201000 0000000000000000 0 _start
# 000000000020102a 0000000000000000 1 /[...]/crti.o:(.text)
# 0000000000201030 00000000000000bd 16 /[...]/crtbegin.o:(.text)
# 0000000000201030 0000000000000000 0 deregister_tm_clones
# 0000000000201060 0000000000000000 0 register_tm_clones
# 00000000002010a0 0000000000000000 0 __do_global_dtors_aux
# 00000000002010c0 0000000000000000 0 frame_dummy
# 00000000002010ed 0000000000000071 1 a.o:(.text)
# 00000000002010ed 0000000000000071 0 main
syms = []
cur_section = None
cur_section_is_useful = False
promoted_name_count = 0
# |is_partial| indicates that an eligible Level 3 line should be used to
# update |syms[-1].full_name| instead of creating a new symbol.
is_partial = False
# Assembly code can create consecutive Level 3 lines with |size == 0|. These
# lines can represent
# (1) assembly functions (should form symbol), or
# (2) assembly labels (should NOT form symbol).
# It seems (2) correlates with the presence of a leading Level 3 line with
# |size > 0|. This gives rise to the following strategy: Each symbol S from
# a Level 3 line suppresses Level 3 lines with |address| less than
# |next_usable_address := S.address + S.size|.
next_usable_address = 0
# For Thin-LTO, a map from each address to the Thin-LTO cache file. This
# provides hints downstream to identify object_paths for .L.ref.tmp symbols,
# but is not useful in the final output. Therefore it's stored separately,
# instead of being in Symbol.
thin_map = {}
tokenizer = self.Tokenize(lines)
in_partitions = False
in_jump_table = False
jump_tables_count = 0
jump_entries_count = 0
for (line, address, size, level, span, tok) in tokenizer:
# Level 1 data match the "Out" column. They specify sections or
# PROVIDE_HIDDEN lines.
if level == 1:
# Ignore sections that belong to feature library partitions. Seeing a
# partition name is an indicator that we've entered a list of feature
# partitions. After these, a single .part.end section will follow to
# reserve memory at runtime. Seeing the .part.end section also marks the
# end of partition sections in the map file.
if tok.endswith('_partition'):
in_partitions = True
elif tok == '.part.end':
# Note that we want to retain .part.end section, so it's fine to
# restart processing on this section, rather than the next one.
in_partitions = False
if in_partitions:
# For now, completely ignore feature partitions.
cur_section = None
cur_section_is_useful = False
else:
if not tok.startswith('PROVIDE_HIDDEN'):
self._section_ranges[tok] = (address, size)
cur_section = tok
# E.g., Want to convert "(.text._name)" -> "_name" later.
mangled_start_idx = len(cur_section) + 2
cur_section_is_useful = (
cur_section in models.BSS_SECTIONS
or cur_section in (models.SECTION_RODATA, models.SECTION_TEXT)
or cur_section.startswith(models.SECTION_DATA))
elif cur_section_is_useful:
# Level 2 data match the "In" column. They specify object paths and
# section names within objects, or '<internal>:...'.
if level == 2:
# E.g., 'path.o:(.text._name)' => ['path.o', '(.text._name)'].
cur_obj, paren_value = tok.split(':')
in_jump_table = '.L.cfi.jumptable' in paren_value
if in_jump_table:
# Store each CFI jump table as a Level 2 symbol, whose Level 3
# details are discarded.
jump_tables_count += 1
cur_obj = '' # Replaces 'lto.tmp' to prevent problem later.
mangled_name = '** CFI jump table'
else:
# E.g., '(.text.unlikely._name)' -> '_name'.
mangled_name = paren_value[mangled_start_idx:-1]
cur_flags = _FlagsFromMangledName(mangled_name)
is_partial = True
# As of 2017/11 LLD does not distinguish merged strings from other
# merged data. Feature request is filed under:
# https://bugs.llvm.org/show_bug.cgi?id=35248
if cur_obj == '<internal>':
if cur_section == '.rodata' and mangled_name == '':
# Treat all <internal> sections within .rodata as as string
# literals. Some may hold numeric constants or other data, but
# there is currently no way to distinguish them.
mangled_name = '** lld merge strings'
else:
# e.g. <internal>:(.text.thunk)
mangled_name = '** ' + mangled_name
is_partial = False
cur_obj = None
elif cur_obj == 'lto.tmp' or 'thinlto-cache' in cur_obj:
thin_map[address] = os.path.basename(cur_obj)
cur_obj = None
# Create a symbol here since there may be no ensuing Level 3 lines.
# But if there are, then the symbol can be modified later as sym[-1].
sym = models.Symbol(cur_section, size, address=address,
full_name=mangled_name, object_path=cur_obj,
flags=cur_flags)
syms.append(sym)
# Level 3 |address| is nested under Level 2, don't add |size|.
next_usable_address = address
# Level 3 data match the "Symbol" column. They specify symbol names or
# special names such as '.L_MergeGlobals'. Annotations such as '$d',
# '$t.42' also appear at Level 3, but they are consumed by |tokenizer|,
# so don't appear hear.
elif level == 3:
# Handle .L.cfi.jumptable.
if in_jump_table:
# Level 3 entries in CFI jump tables are thunks with mangled names.
# Extracting them as symbols is not worthwhile; we only store the
# Level 2 symbol, and print the count for verbose output. For
# counting, '__typeid_' entries are excluded since they're likely
# just annotations.
if not tok.startswith('__typeid_'):
jump_entries_count += 1
continue
# Ignore anything with '.L_MergedGlobals' prefix. This seems to only
# happen for ARM (32-bit) builds.
if tok.startswith('.L_MergedGlobals'):
continue
# Use |span| to decide whether to use a Level 3 line for Symbols. This
# is useful for two purposes:
# * This is a better indicator than |size|, which can be 0 for
# assembly functions.
# * If multiple Level 3 lines have the same starting address, this
# cause all but the last line to have |span > 0|. This dedups lines
# with identical symbol names (why do they exist?). Note that this
# also skips legitimate aliases, but that's desired because nm.py
# (downstream) assumes no aliases already exist.
if span > 0:
stripped_tok = demangle.StripLlvmPromotedGlobalNames(tok)
if len(tok) != len(stripped_tok):
promoted_name_count += 1
tok = stripped_tok
tok = _NormalizeName(tok)
# Handle special case where a partial symbol consumes bytes before
# the first Level 3 symbol.
if is_partial and syms[-1].address < address:
# Truncate the partial symbol and leave it without |full_name|.
# The data from the current line will form a new symbol.
syms[-1].size = address - syms[-1].address
next_usable_address = address
is_partial = False
if is_partial:
syms[-1].full_name = tok
syms[-1].size = size if size > 0 else min(syms[-1].size, span)
next_usable_address = address + syms[-1].size
is_partial = False
elif address >= next_usable_address:
if tok.startswith('__typeid_'):
assert size == 1
if tok.endswith('_byte_array'):
# CFI byte array table: |size| is inaccurate, so use |span|.
size_to_use = span
else:
# Likely '_global_addr' or '_unique_member'. These should be:
# * Skipped since they're in CFI tables.
# * Suppressed (via |next_usable_address|) by another Level 3
# symbol.
# Anything that makes it here would be an anomaly worthy of
# investigation, so print warnings.
logging.warn('Unrecognized __typeid_ symbol at %08X', address)
continue
else:
# Prefer |size|, and only fall back to |span| if |size == 0|.
size_to_use = size if size > 0 else span
sym = models.Symbol(cur_section, size_to_use, address=address,
full_name=tok, flags=cur_flags)
syms.append(sym)
# Suppress symbols with overlapping |address|. This eliminates
# labels from assembly sources.
next_usable_address = address + size_to_use
if cur_obj is not None:
syms[-1].object_path = cur_obj
else:
logging.error('Problem line: %r', line)
if promoted_name_count:
logging.info('Found %d promoted global names', promoted_name_count)
if jump_tables_count:
logging.info('Found %d CFI jump tables with %d total entries',
jump_tables_count, jump_entries_count)
return self._section_ranges, syms, {'thin_map': thin_map}
def _DetectLto(lines):
"""Scans LLD linker map file and returns whether LTO was used."""
# It's assumed that the first line in |lines| was consumed to determine that
# LLD was used. Seek 'thinlto-cache' prefix within an "indicator section" as
# indicator for LTO.
found_indicator_section = False
# Potential names of "main section". Only one gets used.
indicator_section_set = set(['.rodata', '.ARM.exidx'])
start_pos = -1
for line in lines:
# Shortcut to avoid regex: The first line seen (second line in file) should
# start a section, and start with '.', e.g.:
# 194 194 13 1 .interp
# Assign |start_pos| as position of '.', and trim everything before!
if start_pos < 0:
start_pos = line.index('.')
if len(line) < start_pos:
continue
line = line[start_pos:]
tok = line.lstrip() # Allow whitespace at right.
indent_size = len(line) - len(tok)
if indent_size == 0: # Section change.
if found_indicator_section: # Exit if just visited "main section".
break
if tok.strip() in indicator_section_set:
found_indicator_section = True
elif indent_size == 8:
if found_indicator_section:
if tok.startswith('thinlto-cache'):
return True
return False
def DetectLinkerNameFromMapFile(lines):
"""Heuristic linker detection from partial scan of the linker map.
Args:
lines: Iterable of lines from the linker map.
Returns:
A coded linker name.
"""
first_line = next(lines)
if first_line.startswith('Address'):
return 'lld-lto_v0' if _DetectLto(lines) else 'lld_v0'
if first_line.lstrip().startswith('VMA'):
return 'lld-lto_v1' if _DetectLto(lines) else 'lld_v1'
if first_line.startswith('Archive member'):
return 'gold'
raise Exception('Invalid map file: ' + first_line)
class MapFileParser(object):
"""Parses a linker map file generated from a specified linker."""
def Parse(self, linker_name, lines):
"""Parses a linker map file.
Args:
linker_name: Coded linker name to specify a linker.
lines: Iterable of lines from the linker map.
Returns:
A tuple of (section_ranges, symbols, extras).
"""
next(lines) # Consume the first line of headers.
if linker_name.startswith('lld'):
inner_parser = MapFileParserLld(linker_name)
elif linker_name == 'gold':
inner_parser = MapFileParserGold()
else:
raise Exception('.map file is from a unsupported linker.')
section_ranges, syms, extras = inner_parser.Parse(lines)
for sym in syms:
if sym.object_path and not sym.object_path.endswith(')'):
# Don't want '' to become '.'.
# Thin archives' paths will get fixed in |ar.CreateThinObjectPath|.
sym.object_path = os.path.normpath(sym.object_path)
return (section_ranges, syms, extras)
def DeduceObjectPathsFromThinMap(raw_symbols, extras):
"""Uses Thin-LTO object paths to find object_paths of symbols. """
thin_map = extras.get('thin_map', None) # |address| -> |thin_obj|
if not thin_map: # None or empty.
logging.info('No thin-object-path found: Skipping object path deduction.')
return
# Build map of |thin_obj| -> |object_paths|.
thin_obj_to_object_paths = collections.defaultdict(set)
logging.info('Building map of thin-object-path -> object path.')
for symbol in raw_symbols:
if symbol.object_path:
thin_obj = thin_map.get(symbol.address, None)
if thin_obj:
thin_obj_to_object_paths[thin_obj].add(symbol.object_path)
# For each symbol without |object_path|, translate |address| -> |thin_obj| ->
# |object_paths|. If unique, then assign to symbol. Stats are kept, keyed on
# |len(object_paths)|.
# Example symbols this happens with: ".L.ref.tmp", "** outlined function".
logging.info('Assigning object paths to using ThinLTO paths.')
ref_tmp_popu = [0] * 3
ref_tmp_pss = [0] * 3
for symbol in raw_symbols:
if not symbol.object_path:
thin_obj = thin_map.get(symbol.address)
# Ignore non-native symbols.
if thin_obj:
count = 0
object_paths = thin_obj_to_object_paths.get(thin_obj)
if object_paths is not None:
count = min(len(object_paths), 2) # 2+ maps to 2.
# We could create path aliases when count > 1, but it wouldn't
# necessarily be correct. That occurs when *another* symbol from the
# same .o file contains a path alias, but not necessarily this symbol.
if count == 1:
symbol.object_path = next(iter(object_paths))
ref_tmp_popu[count] += 1
ref_tmp_pss[count] += symbol.pss
# As of Mar 2019:
# No match: 2 symbols with total PSS = 20
# Assigned (1 object path): 1098 symbols with total PSS = 55454
# Ambiguous (2+ object paths): 2315 symbols with total PSS = 41941
logging.info('Object path deduction results for pathless symbols:')
logging.info(' No match: %d symbols with total PSS = %d', ref_tmp_popu[0],
ref_tmp_pss[0])
logging.info(' Assigned (1 object path): %d symbols with total PSS = %d',
ref_tmp_popu[1], ref_tmp_pss[1])
logging.info(' Ambiguous (2+ object paths): %d symbols with total PSS = %d',
ref_tmp_popu[2], ref_tmp_pss[2])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('linker_file', type=os.path.realpath)
parser.add_argument(
'-v',
'--verbose',
default=0,
action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--dump', action='store_true')
args = parser.parse_args()
logging.basicConfig(
level=logging.WARNING - args.verbose * 10,
format='%(levelname).1s %(relativeCreated)6d %(message)s')
with open(args.linker_file, 'r') as map_file:
linker_name = DetectLinkerNameFromMapFile(map_file)
print('Linker type: %s' % linker_name)
with open(args.linker_file, 'r') as map_file:
section_ranges, syms, extras = MapFileParser().Parse(linker_name, map_file)
if args.dump:
print(section_ranges)
for sym in syms:
print(sym)
else:
# Enter interactive shell.
readline.parse_and_bind('tab: complete')
variables = {
'section_ranges': section_ranges,
'syms': syms,
'extras': extras
}
banner_lines = [
'*' * 80,
'Variables:',
' section_ranges: Map from section name to (address, size).',
' syms: Raw symbols parsed from the linker map file.',
' extras: Format-specific extra data.',
'*' * 80,
]
code.InteractiveConsole(variables).interact('\n'.join(banner_lines))
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Central list of tests to run (as appropriate for a given config). Add tests
to run by modifying this file.
Note that this file is both imported (by mojob.py) and run directly (via a
recipe)."""
import argparse
import json
import os
import sys
from mopy.config import Config
from mopy.paths import Paths
def GetTestList(config, verbose_count=0):
"""Gets the list of tests to run for the given config. The test list (which is
returned) is just a list of dictionaries, each dictionary having two required
fields:
{
"name": "Short name",
"command": ["python", "test_runner.py", "--some", "args"]
}
"""
types_to_run = set(config.test_types)
# See above for a description of the test list.
test_list = []
paths = Paths(config)
build_dir = paths.SrcRelPath(paths.build_dir)
target_os = config.target_os
verbose_flags = verbose_count * ["--verbose"]
# Utility functions ----------------------------------------------------------
# Call this to determine if a test matching classes this_tests_types should
# run: e.g., ShouldRunTest(Config.TEST_TYPE_DEFAULT, "sky") returns true if
# the test list being requested specifies the default set or the "sky" set.
def ShouldRunTest(*this_tests_types):
return not types_to_run.isdisjoint(this_tests_types)
# Call this to add the given command to the test list.
def AddEntry(name, command):
if config.sanitizer == Config.SANITIZER_ASAN:
command = (["python", os.path.join("mojo", "tools",
"run_command_through_symbolizer.py")] +
command)
test_list.append({"name": name, "command": command})
# Call this to add the given command to the test list. If appropriate, the
# command will be run under xvfb.
def AddXvfbEntry(name, command):
real_command = ["python"]
if config.target_os == Config.OS_LINUX:
real_command += ["./testing/xvfb.py", paths.SrcRelPath(paths.build_dir)]
real_command += command
AddEntry(name, real_command)
# ----------------------------------------------------------------------------
# TODO(vtl): Currently, we only know how to run tests for Android, Linux, or
# Windows.
if target_os not in (Config.OS_ANDROID, Config.OS_LINUX, Config.OS_WINDOWS,
Config.OS_IOS):
return test_list
# Tests run by default -------------------------------------------------------
# C++ unit tests:
if ShouldRunTest(Config.TEST_TYPE_DEFAULT, Config.TEST_TYPE_UNIT):
AddXvfbEntry("Unit tests",
[os.path.join("mojo", "tools", "test_runner.py"),
os.path.join("mojo", "tools", "data", "unittests"),
build_dir] + verbose_flags)
# NaCl tests (Linux only):
if (target_os == Config.OS_LINUX and
config.sanitizer != Config.SANITIZER_ASAN):
AddEntry("NaCl tests",
[os.path.join(build_dir, "monacl_shell"),
os.path.join(build_dir, "irt_" + config.target_cpu,
"irt_mojo.nexe"),
os.path.join(build_dir, "clang_newlib_" + config.target_cpu,
"monacl_test.nexe")])
# C++ app tests:
if ShouldRunTest(Config.TEST_TYPE_DEFAULT, "app"):
AddXvfbEntry("App tests",
[os.path.join("mojo", "tools", "apptest_runner.py"),
os.path.join("mojo", "tools", "data", "apptests"),
build_dir] + verbose_flags)
# NaCl app tests (Linux only):
if (target_os == Config.OS_LINUX and
config.sanitizer != Config.SANITIZER_ASAN):
AddXvfbEntry("NaCl app tests",
[os.path.join("mojo", "tools", "apptest_runner.py"),
os.path.join("mojo", "tools", "data", "nacl_apptests"),
build_dir] + verbose_flags)
# Go unit tests (Linux-only):
if (target_os == Config.OS_LINUX and
config.sanitizer != Config.SANITIZER_ASAN and
ShouldRunTest(Config.TEST_TYPE_DEFAULT, Config.TEST_TYPE_UNIT, "go")):
AddEntry("Go unit tests",
[os.path.join(build_dir, "obj", "mojo", "go", "system_test")])
# Python unit tests:
if ShouldRunTest(Config.TEST_TYPE_DEFAULT, Config.TEST_TYPE_UNIT, "python"):
AddEntry("Python unit tests",
["python", os.path.join("mojo", "tools",
"run_mojo_python_tests.py")])
# Python bindings tests (Linux-only):
# See http://crbug.com/438781 for details on asan exclusion.
if (target_os == Config.OS_LINUX and
ShouldRunTest(Config.TEST_TYPE_DEFAULT, Config.TEST_TYPE_UNIT,
"python") and
config.sanitizer != Config.SANITIZER_ASAN):
AddEntry("Python bindings tests",
["python",
os.path.join("mojo", "tools",
"run_mojo_python_bindings_tests.py"),
"--build-dir=" + build_dir])
# Sky tests (Linux-only):
# TODO(abarth): Re-enabled in ASAN once the DartVM works in ASAN.
# See https://code.google.com/p/dart/issues/detail?id=22122
if (target_os == Config.OS_LINUX and
ShouldRunTest(Config.TEST_TYPE_DEFAULT, "sky") and
config.sanitizer != Config.SANITIZER_ASAN):
sky_command = ["python",
"sky/tools/test_sky",
"-t", os.path.basename(build_dir),
"--no-new-test-results", "--no-show-results", "--verbose"]
if config.values.get("builder_name"):
sky_command += ["--builder-name", config.values["builder_name"]]
if config.values.get("build_number"):
sky_command += ["--build-number", config.values["build_number"]]
if config.values.get("master_name"):
sky_command += ["--master-name", config.values["master_name"]]
if config.values.get("test_results_server"):
sky_command += ["--test-results-server",
config.values["test_results_server"]]
AddXvfbEntry("Sky tests", sky_command)
# Observatory tests (Linux-only):
if target_os == Config.OS_LINUX:
AddEntry("Dart Observatory tests",
["python",
os.path.join("mojo", "dart", "observatory_tester", "runner.py"),
"--build-dir=" + build_dir,
"--dart-exe=third_party/dart-sdk/dart-sdk/bin/dart"])
AddEntry("Dart HTTP Load test",
["python",
os.path.join("mojo", "dart", "http_load_test", "runner.py"),
"--build-dir=" + build_dir,
"--dart-exe=third_party/dart-sdk/dart-sdk/bin/dart"])
# mojo tools unit tests:
if ShouldRunTest(Config.TEST_TYPE_DEFAULT, Config.TEST_TYPE_UNIT, "tools"):
AddEntry("Mojo tools unit tests",
["python", os.path.join("mojo", "tools", "testing",
"mojom_fetcher",
"mojom_fetcher_tests.py")])
# Dart mojom package generate.dart script tests:
if target_os == Config.OS_LINUX:
AddEntry("Dart mojom package generate tests",
[os.path.join("third_party", "dart-sdk", "dart-sdk", "bin", "dart"),
"--checked",
"-p", os.path.join("mojo", "dart", "mojom", "packages"),
os.path.join("mojo", "dart", "mojom", "test", "generate_test.dart")])
# Perf tests -----------------------------------------------------------------
if target_os == Config.OS_LINUX and ShouldRunTest(Config.TEST_TYPE_PERF):
perf_id = "linux_%s" % ("debug" if config.is_debug else "release")
test_names = ["mojo_public_system_perftests",
"mojo_public_bindings_perftests"]
for test_name in test_names:
command = ["python",
os.path.join("mojo", "tools", "perf_test_runner.py"),
"--perf-id", perf_id,
"--test-name", test_name,
"--perf-data-path",
os.path.join(build_dir, test_name + "_perf.log"),
"--production-dashboard"]
if config.values.get("builder_name"):
command += ["--builder-name", config.values["builder_name"]]
if config.values.get("build_number"):
command += ["--build-number", config.values["build_number"]]
if config.values.get("master_name"):
command += ["--master-name", config.values["master_name"]]
command += [os.path.join(build_dir, test_name)]
AddEntry(test_name, command)
# Integration tests ----------------------------------------------------------
if target_os == Config.OS_ANDROID and ShouldRunTest(
Config.TEST_TYPE_DEFAULT, Config.TEST_TYPE_INTEGRATION):
AddEntry("Integration test (MojoTest)",
["python",
os.path.join("build", "android", "test_runner.py"),
"instrumentation",
"--test-apk=MojoTest",
"--output-directory=%s" % build_dir,
"--test_data=bindings:mojo/public/interfaces/bindings/tests/data"]
+ verbose_flags)
return test_list
def main():
parser = argparse.ArgumentParser(description="Gets tests to execute.")
parser.add_argument("config_file", metavar="config.json",
type=argparse.FileType("rb"),
help="Input JSON file with test configuration.")
parser.add_argument("test_list_file", metavar="test_list.json", nargs="?",
type=argparse.FileType("wb"), default=sys.stdout,
help="Output JSON file with test list.")
args = parser.parse_args()
config = Config(**json.load(args.config_file))
test_list = GetTestList(config)
json.dump(test_list, args.test_list_file, indent=2)
args.test_list_file.write("\n")
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
"""
====================
Scheme Editor Widget
====================
"""
import sys
import logging
import itertools
import unicodedata
import copy
from operator import attrgetter
from urllib.parse import urlencode
from PyQt4.QtGui import (
QWidget, QVBoxLayout, QInputDialog, QMenu, QAction, QActionGroup,
QKeySequence, QUndoStack, QUndoCommand, QGraphicsItem, QGraphicsObject,
QGraphicsTextItem, QCursor, QFont, QPainter, QPixmap, QColor,
QIcon, QWhatsThisClickedEvent, QBrush
)
from PyQt4.QtCore import (
Qt, QObject, QEvent, QSignalMapper, QRectF, QCoreApplication
)
from PyQt4.QtCore import pyqtProperty as Property, pyqtSignal as Signal
from ..registry.qt import whats_this_helper
from ..gui.quickhelp import QuickHelpTipEvent
from ..gui.utils import message_information, disabled
from ..scheme import (
scheme, signalmanager, SchemeNode, SchemeLink, BaseSchemeAnnotation
)
from ..scheme import widgetsscheme
from ..canvas.scene import CanvasScene
from ..canvas.view import CanvasView
from ..canvas import items
from . import interactions
from . import commands
from . import quickmenu
log = logging.getLogger(__name__)
# TODO: Should this be moved to CanvasScene?
class GraphicsSceneFocusEventListener(QGraphicsObject):
itemFocusedIn = Signal(object)
itemFocusedOut = Signal(object)
def __init__(self, parent=None):
QGraphicsObject.__init__(self, parent)
self.setFlag(QGraphicsItem.ItemHasNoContents)
def sceneEventFilter(self, obj, event):
if event.type() == QEvent.FocusIn and \
obj.flags() & QGraphicsItem.ItemIsFocusable:
obj.focusInEvent(event)
if obj.hasFocus():
self.itemFocusedIn.emit(obj)
return True
elif event.type() == QEvent.FocusOut:
obj.focusOutEvent(event)
if not obj.hasFocus():
self.itemFocusedOut.emit(obj)
return True
return QGraphicsObject.sceneEventFilter(self, obj, event)
def boundingRect(self):
return QRectF()
class SchemeEditWidget(QWidget):
"""
A widget for editing a :class:`~.scheme.Scheme` instance.
"""
#: Undo command has become available/unavailable.
undoAvailable = Signal(bool)
#: Redo command has become available/unavailable.
redoAvailable = Signal(bool)
#: Document modified state has changed.
modificationChanged = Signal(bool)
#: Undo command was added to the undo stack.
undoCommandAdded = Signal()
#: Item selection has changed.
selectionChanged = Signal()
#: Document title has changed.
titleChanged = Signal(str)
#: Document path has changed.
pathChanged = Signal(str)
# Quick Menu triggers
(NoTriggers,
RightClicked,
DoubleClicked,
SpaceKey,
AnyKey) = [0, 1, 2, 4, 8]
def __init__(self, parent=None, ):
QWidget.__init__(self, parent)
self.__modified = False
self.__registry = None
self.__scheme = None
self.__path = ""
self.__quickMenuTriggers = SchemeEditWidget.SpaceKey | \
SchemeEditWidget.DoubleClicked
self.__emptyClickButtons = 0
self.__channelNamesVisible = True
self.__nodeAnimationEnabled = True
self.__possibleSelectionHandler = None
self.__possibleMouseItemsMove = False
self.__itemsMoving = {}
self.__contextMenuTarget = None
self.__quickMenu = None
self.__quickTip = ""
self.__undoStack = QUndoStack(self)
self.__undoStack.cleanChanged[bool].connect(self.__onCleanChanged)
# scheme node properties when set to a clean state
self.__cleanProperties = []
self.__editFinishedMapper = QSignalMapper(self)
self.__editFinishedMapper.mapped[QObject].connect(
self.__onEditingFinished
)
self.__annotationGeomChanged = QSignalMapper(self)
self.__setupActions()
self.__setupUi()
self.__editMenu = QMenu(self.tr("&Edit"), self)
self.__editMenu.addAction(self.__undoAction)
self.__editMenu.addAction(self.__redoAction)
self.__editMenu.addSeparator()
self.__editMenu.addAction(self.__duplicateSelectedAction)
self.__editMenu.addAction(self.__selectAllAction)
self.__widgetMenu = QMenu(self.tr("&Widget"), self)
self.__widgetMenu.addAction(self.__openSelectedAction)
self.__widgetMenu.addSeparator()
self.__widgetMenu.addAction(self.__renameAction)
self.__widgetMenu.addAction(self.__removeSelectedAction)
self.__widgetMenu.addSeparator()
self.__widgetMenu.addAction(self.__helpAction)
self.__linkMenu = QMenu(self.tr("Link"), self)
self.__linkMenu.addAction(self.__linkEnableAction)
self.__linkMenu.addSeparator()
self.__linkMenu.addAction(self.__linkRemoveAction)
self.__linkMenu.addAction(self.__linkResetAction)
def __setupActions(self):
self.__zoomAction = \
QAction(self.tr("Zoom"), self,
objectName="zoom-action",
checkable=True,
shortcut=QKeySequence.ZoomIn,
toolTip=self.tr("Zoom in the workflow."),
toggled=self.toggleZoom,
)
self.__cleanUpAction = \
QAction(self.tr("Clean Up"), self,
objectName="cleanup-action",
toolTip=self.tr("Align widgets to a grid."),
triggered=self.alignToGrid,
)
self.__newTextAnnotationAction = \
QAction(self.tr("Text"), self,
objectName="new-text-action",
toolTip=self.tr("Add a text annotation to the workflow."),
checkable=True,
toggled=self.__toggleNewTextAnnotation,
)
# Create a font size menu for the new annotation action.
self.__fontMenu = QMenu("Font Size", self)
self.__fontActionGroup = group = \
QActionGroup(self, exclusive=True,
triggered=self.__onFontSizeTriggered)
def font(size):
f = QFont(self.font())
f.setPixelSize(size)
return f
for size in [12, 14, 16, 18, 20, 22, 24]:
action = QAction("%ipx" % size, group,
checkable=True,
font=font(size))
self.__fontMenu.addAction(action)
group.actions()[2].setChecked(True)
self.__newTextAnnotationAction.setMenu(self.__fontMenu)
self.__newArrowAnnotationAction = \
QAction(self.tr("Arrow"), self,
objectName="new-arrow-action",
toolTip=self.tr("Add an arrow annotation to the workflow."),
checkable=True,
toggled=self.__toggleNewArrowAnnotation,
)
# Create a color menu for the arrow annotation action
self.__arrowColorMenu = QMenu("Arrow Color",)
self.__arrowColorActionGroup = group = \
QActionGroup(self, exclusive=True,
triggered=self.__onArrowColorTriggered)
def color_icon(color):
icon = QIcon()
for size in [16, 24, 32]:
pixmap = QPixmap(size, size)
pixmap.fill(QColor(0, 0, 0, 0))
p = QPainter(pixmap)
p.setRenderHint(QPainter.Antialiasing)
p.setBrush(color)
p.setPen(Qt.NoPen)
p.drawEllipse(1, 1, size - 2, size - 2)
p.end()
icon.addPixmap(pixmap)
return icon
for color in ["#000", "#C1272D", "#662D91", "#1F9CDF", "#39B54A"]:
icon = color_icon(QColor(color))
action = QAction(group, icon=icon, checkable=True,
iconVisibleInMenu=True)
action.setData(color)
self.__arrowColorMenu.addAction(action)
group.actions()[1].setChecked(True)
self.__newArrowAnnotationAction.setMenu(self.__arrowColorMenu)
self.__undoAction = self.__undoStack.createUndoAction(self)
self.__undoAction.setShortcut(QKeySequence.Undo)
self.__undoAction.setObjectName("undo-action")
self.__redoAction = self.__undoStack.createRedoAction(self)
self.__redoAction.setShortcut(QKeySequence.Redo)
self.__redoAction.setObjectName("redo-action")
self.__selectAllAction = \
QAction(self.tr("Select All"), self,
objectName="select-all-action",
toolTip=self.tr("Select all items."),
triggered=self.selectAll,
shortcut=QKeySequence.SelectAll
)
self.__openSelectedAction = \
QAction(self.tr("Open"), self,
objectName="open-action",
toolTip=self.tr("Open selected widget"),
triggered=self.openSelected,
enabled=False)
self.__removeSelectedAction = \
QAction(self.tr("Remove"), self,
objectName="remove-selected",
toolTip=self.tr("Remove selected items"),
triggered=self.removeSelected,
enabled=False
)
shortcuts = [Qt.Key_Delete,
Qt.ControlModifier + Qt.Key_Backspace]
if sys.platform == "darwin":
# Command Backspace should be the first
# (visible shortcut in the menu)
shortcuts.reverse()
self.__removeSelectedAction.setShortcuts(shortcuts)
self.__renameAction = \
QAction(self.tr("Rename"), self,
objectName="rename-action",
toolTip=self.tr("Rename selected widget"),
triggered=self.__onRenameAction,
shortcut=QKeySequence(Qt.Key_F2),
enabled=False)
self.__helpAction = \
QAction(self.tr("Help"), self,
objectName="help-action",
toolTip=self.tr("Show widget help"),
triggered=self.__onHelpAction,
shortcut=QKeySequence("F1"),
enabled=False,
)
self.__linkEnableAction = \
QAction(self.tr("Enabled"), self,
objectName="link-enable-action",
triggered=self.__toggleLinkEnabled,
checkable=True,
)
self.__linkRemoveAction = \
QAction(self.tr("Remove"), self,
objectName="link-remove-action",
triggered=self.__linkRemove,
toolTip=self.tr("Remove link."),
)
self.__linkResetAction = \
QAction(self.tr("Reset Signals"), self,
objectName="link-reset-action",
triggered=self.__linkReset,
)
self.__duplicateSelectedAction = \
QAction(self.tr("Duplicate Selected"), self,
objectName="duplicate-action",
enabled=False,
shortcut=QKeySequence(Qt.ControlModifier + Qt.Key_D),
triggered=self.__duplicateSelected,
)
self.addActions([self.__newTextAnnotationAction,
self.__newArrowAnnotationAction,
self.__linkEnableAction,
self.__linkRemoveAction,
self.__linkResetAction,
self.__duplicateSelectedAction])
# Actions which should be disabled while a multistep
# interaction is in progress.
self.__disruptiveActions = \
[self.__undoAction,
self.__redoAction,
self.__removeSelectedAction,
self.__selectAllAction,
self.__duplicateSelectedAction]
def __setupUi(self):
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
scene = CanvasScene()
scene.setItemIndexMethod(CanvasScene.NoIndex)
self.__setupScene(scene)
view = CanvasView(scene)
view.setFrameStyle(CanvasView.NoFrame)
view.setRenderHint(QPainter.Antialiasing)
view.setContextMenuPolicy(Qt.CustomContextMenu)
view.customContextMenuRequested.connect(
self.__onCustomContextMenuRequested
)
self.__view = view
self.__scene = scene
layout.addWidget(view)
self.setLayout(layout)
def __setupScene(self, scene):
"""
Set up a :class:`CanvasScene` instance for use by the editor.
.. note:: If an existing scene is in use it must be teared down using
__teardownScene
"""
scene.set_channel_names_visible(self.__channelNamesVisible)
scene.set_node_animation_enabled(
self.__nodeAnimationEnabled
)
scene.setFont(self.font())
scene.installEventFilter(self)
scene.set_registry(self.__registry)
# Focus listener
self.__focusListener = GraphicsSceneFocusEventListener()
self.__focusListener.itemFocusedIn.connect(
self.__onItemFocusedIn
)
self.__focusListener.itemFocusedOut.connect(
self.__onItemFocusedOut
)
scene.addItem(self.__focusListener)
scene.selectionChanged.connect(
self.__onSelectionChanged
)
scene.node_item_activated.connect(
self.__onNodeActivate
)
scene.annotation_added.connect(
self.__onAnnotationAdded
)
scene.annotation_removed.connect(
self.__onAnnotationRemoved
)
self.__annotationGeomChanged = QSignalMapper(self)
def __teardownScene(self, scene):
"""
Tear down an instance of :class:`CanvasScene` that was used by the
editor.
"""
# Clear the current item selection in the scene so edit action
# states are updated accordingly.
scene.clearSelection()
# Clear focus from any item.
scene.setFocusItem(None)
# Clear the annotation mapper
self.__annotationGeomChanged.deleteLater()
self.__annotationGeomChanged = None
self.__focusListener.itemFocusedIn.disconnect(
self.__onItemFocusedIn
)
self.__focusListener.itemFocusedOut.disconnect(
self.__onItemFocusedOut
)
scene.selectionChanged.disconnect(
self.__onSelectionChanged
)
scene.removeEventFilter(self)
# Clear all items from the scene
scene.blockSignals(True)
scene.clear_scene()
def toolbarActions(self):
"""
Return a list of actions that can be inserted into a toolbar.
At the moment these are:
- 'Zoom' action
- 'Clean up' action (align to grid)
- 'New text annotation' action (with a size menu)
- 'New arrow annotation' action (with a color menu)
"""
return [self.__zoomAction,
self.__cleanUpAction,
self.__newTextAnnotationAction,
self.__newArrowAnnotationAction]
def menuBarActions(self):
"""
Return a list of actions that can be inserted into a `QMenuBar`.
"""
return [self.__editMenu.menuAction(), self.__widgetMenu.menuAction()]
def isModified(self):
"""
Is the document is a modified state.
"""
return self.__modified or not self.__undoStack.isClean()
def setModified(self, modified):
"""
Set the document modified state.
"""
if self.__modified != modified:
self.__modified = modified
if not modified:
self.__cleanProperties = node_properties(self.__scheme)
self.__undoStack.setClean()
else:
self.__cleanProperties = []
modified = Property(bool, fget=isModified, fset=setModified)
def isModifiedStrict(self):
"""
Is the document modified.
Run a strict check against all node properties as they were
at the time when the last call to `setModified(True)` was made.
"""
propertiesChanged = self.__cleanProperties != \
node_properties(self.__scheme)
log.debug("Modified strict check (modified flag: %s, "
"undo stack clean: %s, properties: %s)",
self.__modified,
self.__undoStack.isClean(),
propertiesChanged)
return self.isModified() or propertiesChanged
def setQuickMenuTriggers(self, triggers):
"""
Set quick menu trigger flags.
Flags can be a bitwise `or` of:
- `SchemeEditWidget.NoTrigeres`
- `SchemeEditWidget.RightClicked`
- `SchemeEditWidget.DoubleClicked`
- `SchemeEditWidget.SpaceKey`
- `SchemeEditWidget.AnyKey`
"""
if self.__quickMenuTriggers != triggers:
self.__quickMenuTriggers = triggers
def quickMenuTriggers(self):
"""
Return quick menu trigger flags.
"""
return self.__quickMenuTriggers
def setChannelNamesVisible(self, visible):
"""
Set channel names visibility state. When enabled the links
in the view will have a source/sink channel names displayed over
them.
"""
if self.__channelNamesVisible != visible:
self.__channelNamesVisible = visible
self.__scene.set_channel_names_visible(visible)
def channelNamesVisible(self):
"""
Return the channel name visibility state.
"""
return self.__channelNamesVisible
def setNodeAnimationEnabled(self, enabled):
"""
Set the node item animation enabled state.
"""
if self.__nodeAnimationEnabled != enabled:
self.__nodeAnimationEnabled = enabled
self.__scene.set_node_animation_enabled(enabled)
def nodeAnimationEnabled(self):
"""
Return the node item animation enabled state.
"""
return self.__nodeAnimationEnabled
def undoStack(self):
"""
Return the undo stack.
"""
return self.__undoStack
def setPath(self, path):
"""
Set the path associated with the current scheme.
.. note:: Calling `setScheme` will invalidate the path (i.e. set it
to an empty string)
"""
if self.__path != path:
self.__path = str(path)
self.pathChanged.emit(self.__path)
def path(self):
"""
Return the path associated with the scheme
"""
return self.__path
def setScheme(self, scheme):
"""
Set the :class:`~.scheme.Scheme` instance to display/edit.
"""
if self.__scheme is not scheme:
if self.__scheme:
self.__scheme.title_changed.disconnect(self.titleChanged)
self.__scheme.removeEventFilter(self)
sm = self.__scheme.findChild(signalmanager.SignalManager)
if sm:
sm.stateChanged.disconnect(
self.__signalManagerStateChanged)
self.__scheme = scheme
self.setPath("")
if self.__scheme:
self.__scheme.title_changed.connect(self.titleChanged)
self.titleChanged.emit(scheme.title)
self.__cleanProperties = node_properties(scheme)
sm = scheme.findChild(signalmanager.SignalManager)
if sm:
sm.stateChanged.connect(self.__signalManagerStateChanged)
else:
self.__cleanProperties = []
self.__teardownScene(self.__scene)
self.__scene.deleteLater()
self.__undoStack.clear()
self.__scene = CanvasScene()
self.__scene.setItemIndexMethod(CanvasScene.NoIndex)
self.__setupScene(self.__scene)
self.__view.setScene(self.__scene)
self.__scene.set_scheme(scheme)
if self.__scheme:
self.__scheme.installEventFilter(self)
def scheme(self):
"""
Return the :class:`~.scheme.Scheme` edited by the widget.
"""
return self.__scheme
def scene(self):
"""
Return the :class:`QGraphicsScene` instance used to display the
current scheme.
"""
return self.__scene
def view(self):
"""
Return the :class:`QGraphicsView` instance used to display the
current scene.
"""
return self.__view
def setRegistry(self, registry):
# Is this method necessary?
# It should be removed when the scene (items) is fixed
# so all information regarding the visual appearance is
# included in the node/widget description.
self.__registry = registry
if self.__scene:
self.__scene.set_registry(registry)
self.__quickMenu = None
def quickMenu(self):
"""
Return a :class:`~.quickmenu.QuickMenu` popup menu instance for
new node creation.
"""
if self.__quickMenu is None:
menu = quickmenu.QuickMenu(self)
if self.__registry is not None:
menu.setModel(self.__registry.model())
self.__quickMenu = menu
return self.__quickMenu
def setTitle(self, title):
"""
Set the scheme title.
"""
self.__undoStack.push(
commands.SetAttrCommand(self.__scheme, "title", title)
)
def setDescription(self, description):
"""
Set the scheme description string.
"""
self.__undoStack.push(
commands.SetAttrCommand(self.__scheme, "description", description)
)
def addNode(self, node):
"""
Add a new node (:class:`.SchemeNode`) to the document.
"""
command = commands.AddNodeCommand(self.__scheme, node)
self.__undoStack.push(command)
def createNewNode(self, description, title=None, position=None):
"""
Create a new :class:`.SchemeNode` and add it to the document.
The new node is constructed using :func:`newNodeHelper` method.
"""
node = self.newNodeHelper(description, title, position)
self.addNode(node)
return node
def newNodeHelper(self, description, title=None, position=None):
"""
Return a new initialized :class:`.SchemeNode`. If `title`
and `position` are not supplied they are initialized to sensible
defaults.
"""
if title is None:
title = self.enumerateTitle(description.name)
if position is None:
position = self.nextPosition()
return SchemeNode(description, title=title, position=position)
def enumerateTitle(self, title):
"""
Enumerate a `title` string (i.e. add a number in parentheses) so
it is not equal to any node title in the current scheme.
"""
curr_titles = set([node.title for node in self.scheme().nodes])
template = title + " ({0})"
enumerated = map(template.format, itertools.count(1))
candidates = itertools.chain([title], enumerated)
seq = itertools.dropwhile(curr_titles.__contains__, candidates)
return next(seq)
def nextPosition(self):
"""
Return the next default node position as a (x, y) tuple. This is
a position left of the last added node.
"""
nodes = self.scheme().nodes
if nodes:
x, y = nodes[-1].position
position = (x + 150, y)
else:
position = (150, 150)
return position
def removeNode(self, node):
"""
Remove a `node` (:class:`.SchemeNode`) from the scheme
"""
command = commands.RemoveNodeCommand(self.__scheme, node)
self.__undoStack.push(command)
def renameNode(self, node, title):
"""
Rename a `node` (:class:`.SchemeNode`) to `title`.
"""
command = commands.RenameNodeCommand(self.__scheme, node, title)
self.__undoStack.push(command)
def addLink(self, link):
"""
Add a `link` (:class:`.SchemeLink`) to the scheme.
"""
command = commands.AddLinkCommand(self.__scheme, link)
self.__undoStack.push(command)
def removeLink(self, link):
"""
Remove a link (:class:`.SchemeLink`) from the scheme.
"""
command = commands.RemoveLinkCommand(self.__scheme, link)
self.__undoStack.push(command)
def addAnnotation(self, annotation):
"""
Add `annotation` (:class:`.BaseSchemeAnnotation`) to the scheme
"""
command = commands.AddAnnotationCommand(self.__scheme, annotation)
self.__undoStack.push(command)
def removeAnnotation(self, annotation):
"""
Remove `annotation` (:class:`.BaseSchemeAnnotation`) from the scheme.
"""
command = commands.RemoveAnnotationCommand(self.__scheme, annotation)
self.__undoStack.push(command)
def removeSelected(self):
"""
Remove all selected items in the scheme.
"""
selected = self.scene().selectedItems()
if not selected:
return
self.__undoStack.beginMacro(self.tr("Remove"))
for item in selected:
if isinstance(item, items.NodeItem):
node = self.scene().node_for_item(item)
self.__undoStack.push(
commands.RemoveNodeCommand(self.__scheme, node)
)
elif isinstance(item, items.annotationitem.Annotation):
annot = self.scene().annotation_for_item(item)
self.__undoStack.push(
commands.RemoveAnnotationCommand(self.__scheme, annot)
)
self.__undoStack.endMacro()
def selectAll(self):
"""
Select all selectable items in the scheme.
"""
for item in self.__scene.items():
if item.flags() & QGraphicsItem.ItemIsSelectable:
item.setSelected(True)
def toggleZoom(self, zoom):
"""
Toggle view zoom. If `zoom` is True the scheme is displayed
scaled to 150%.
"""
view = self.view()
if zoom:
view.scale(1.5, 1.5)
else:
view.resetTransform()
def alignToGrid(self):
"""
Align nodes to a grid.
"""
# TODO: The the current layout implementation is BAD (fix is urgent).
tile_size = 150
tiles = {}
nodes = sorted(self.scheme().nodes, key=attrgetter("position"))
if nodes:
self.__undoStack.beginMacro(self.tr("Align To Grid"))
for node in nodes:
x, y = node.position
x = int(round(float(x) / tile_size) * tile_size)
y = int(round(float(y) / tile_size) * tile_size)
while (x, y) in tiles:
x += tile_size
self.__undoStack.push(
commands.MoveNodeCommand(self.scheme(), node,
node.position, (x, y))
)
tiles[x, y] = node
self.__scene.item_for_node(node).setPos(x, y)
self.__undoStack.endMacro()
def focusNode(self):
"""
Return the current focused :class:`.SchemeNode` or ``None`` if no
node has focus.
"""
focus = self.__scene.focusItem()
node = None
if isinstance(focus, items.NodeItem):
try:
node = self.__scene.node_for_item(focus)
except KeyError:
# in case the node has been removed but the scene was not
# yet fully updated.
node = None
return node
def selectedNodes(self):
"""
Return all selected :class:`.SchemeNode` items.
"""
return list(map(self.scene().node_for_item,
self.scene().selected_node_items()))
def selectedAnnotations(self):
"""
Return all selected :class:`.BaseSchemeAnnotation` items.
"""
return list(map(self.scene().annotation_for_item,
self.scene().selected_annotation_items()))
def openSelected(self):
"""
Open (show and raise) all widgets for the current selected nodes.
"""
selected = self.scene().selected_node_items()
for item in selected:
self.__onNodeActivate(item)
def editNodeTitle(self, node):
"""
Edit (rename) the `node`'s title. Opens an input dialog.
"""
name, ok = QInputDialog.getText(
self, self.tr("Rename"),
str(self.tr("Enter a new name for the '%s' widget")) \
% node.title,
text=node.title
)
if ok:
self.__undoStack.push(
commands.RenameNodeCommand(self.__scheme, node, node.title,
str(name))
)
def __onCleanChanged(self, clean):
if self.isWindowModified() != (not clean):
self.setWindowModified(not clean)
self.modificationChanged.emit(not clean)
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.__updateFont()
QWidget.changeEvent(self, event)
def eventFilter(self, obj, event):
# Filter the scene's drag/drop events.
if obj is self.scene():
etype = event.type()
if etype == QEvent.GraphicsSceneDragEnter or \
etype == QEvent.GraphicsSceneDragMove:
mime_data = event.mimeData()
if mime_data.hasFormat(
"application/vnv.orange-canvas.registry.qualified-name"
):
event.acceptProposedAction()
else:
event.ignore()
return True
elif etype == QEvent.GraphicsSceneDrop:
data = event.mimeData()
qname = data.data(
"application/vnv.orange-canvas.registry.qualified-name"
)
try:
desc = self.__registry.widget(bytes(qname).decode())
except KeyError:
log.error("Unknown qualified name '%s'", qname)
else:
pos = event.scenePos()
self.createNewNode(desc, position=(pos.x(), pos.y()))
return True
elif etype == QEvent.GraphicsSceneMousePress:
return self.sceneMousePressEvent(event)
elif etype == QEvent.GraphicsSceneMouseMove:
return self.sceneMouseMoveEvent(event)
elif etype == QEvent.GraphicsSceneMouseRelease:
return self.sceneMouseReleaseEvent(event)
elif etype == QEvent.GraphicsSceneMouseDoubleClick:
return self.sceneMouseDoubleClickEvent(event)
elif etype == QEvent.KeyPress:
return self.sceneKeyPressEvent(event)
elif etype == QEvent.KeyRelease:
return self.sceneKeyReleaseEvent(event)
elif etype == QEvent.GraphicsSceneContextMenu:
return self.sceneContextMenuEvent(event)
elif obj is self.__scheme:
if event.type() == QEvent.WhatsThisClicked:
# Re post the event
self.__showHelpFor(event.href())
elif event.type() == \
widgetsscheme.ActivateParentEvent.ActivateParent:
self.window().activateWindow()
self.window().raise_()
return QWidget.eventFilter(self, obj, event)
def sceneMousePressEvent(self, event):
scene = self.__scene
if scene.user_interaction_handler:
return False
pos = event.scenePos()
anchor_item = scene.item_at(pos, items.NodeAnchorItem,
buttons=Qt.LeftButton)
if anchor_item and event.button() == Qt.LeftButton:
# Start a new link starting at item
scene.clearSelection()
handler = interactions.NewLinkAction(self)
self._setUserInteractionHandler(handler)
return handler.mousePressEvent(event)
any_item = scene.item_at(pos)
if not any_item:
self.__emptyClickButtons |= event.button()
if not any_item and event.button() == Qt.LeftButton:
# Create a RectangleSelectionAction but do not set in on the scene
# just yet (instead wait for the mouse move event).
handler = interactions.RectangleSelectionAction(self)
rval = handler.mousePressEvent(event)
if rval == True:
self.__possibleSelectionHandler = handler
return rval
if any_item and event.button() == Qt.LeftButton:
self.__possibleMouseItemsMove = True
self.__itemsMoving.clear()
self.__scene.node_item_position_changed.connect(
self.__onNodePositionChanged
)
self.__annotationGeomChanged.mapped[QObject].connect(
self.__onAnnotationGeometryChanged
)
set_enabled_all(self.__disruptiveActions, False)
return False
def sceneMouseMoveEvent(self, event):
scene = self.__scene
if scene.user_interaction_handler:
return False
if self.__emptyClickButtons & Qt.LeftButton and \
event.buttons() & Qt.LeftButton and \
self.__possibleSelectionHandler:
# Set the RectangleSelection (initialized in mousePressEvent)
# on the scene
handler = self.__possibleSelectionHandler
self._setUserInteractionHandler(handler)
self.__possibleSelectionHandler = None
return handler.mouseMoveEvent(event)
return False
def sceneMouseReleaseEvent(self, event):
scene = self.__scene
if scene.user_interaction_handler:
return False
if event.button() == Qt.LeftButton and self.__possibleMouseItemsMove:
self.__possibleMouseItemsMove = False
self.__scene.node_item_position_changed.disconnect(
self.__onNodePositionChanged
)
self.__annotationGeomChanged.mapped[QObject].disconnect(
self.__onAnnotationGeometryChanged
)
set_enabled_all(self.__disruptiveActions, True)
if self.__itemsMoving:
self.__scene.mouseReleaseEvent(event)
stack = self.undoStack()
stack.beginMacro(self.tr("Move"))
for scheme_item, (old, new) in self.__itemsMoving.items():
if isinstance(scheme_item, SchemeNode):
command = commands.MoveNodeCommand(
self.scheme(), scheme_item, old, new
)
elif isinstance(scheme_item, BaseSchemeAnnotation):
command = commands.AnnotationGeometryChange(
self.scheme(), scheme_item, old, new
)
else:
continue
stack.push(command)
stack.endMacro()
self.__itemsMoving.clear()
return True
elif event.button() == Qt.LeftButton:
self.__possibleSelectionHandler = None
return False
def sceneMouseDoubleClickEvent(self, event):
scene = self.__scene
if scene.user_interaction_handler:
return False
item = scene.item_at(event.scenePos())
if not item and self.__quickMenuTriggers & \
SchemeEditWidget.DoubleClicked:
# Double click on an empty spot
# Create a new node using QuickMenu
action = interactions.NewNodeAction(self)
with disabled(self.__undoAction), disabled(self.__redoAction):
action.create_new(event.screenPos())
event.accept()
return True
item = scene.item_at(event.scenePos(), items.LinkItem,
buttons=Qt.LeftButton)
if item is not None and event.button() == Qt.LeftButton:
link = self.scene().link_for_item(item)
action = interactions.EditNodeLinksAction(self, link.source_node,
link.sink_node)
action.edit_links()
event.accept()
return True
return False
def sceneKeyPressEvent(self, event):
scene = self.__scene
if scene.user_interaction_handler:
return False
# If a QGraphicsItem is in text editing mode, don't interrupt it
focusItem = scene.focusItem()
if focusItem and isinstance(focusItem, QGraphicsTextItem) and \
focusItem.textInteractionFlags() & Qt.TextEditable:
return False
# If the mouse is not over out view
if not self.view().underMouse():
return False
handler = None
searchText = ""
if (event.key() == Qt.Key_Space and \
self.__quickMenuTriggers & SchemeEditWidget.SpaceKey):
handler = interactions.NewNodeAction(self)
elif len(event.text()) and \
self.__quickMenuTriggers & SchemeEditWidget.AnyKey and \
is_printable(str(event.text())[0]):
handler = interactions.NewNodeAction(self)
searchText = str(event.text())
# TODO: set the search text to event.text() and set focus on the
# search line
if handler is not None:
# Control + Backspace (remove widget action on Mac OSX) conflicts
# with the 'Clear text' action in the search widget (there might
# be selected items in the canvas), so we disable the
# remove widget action so the text editing follows standard
# 'look and feel'
with disabled(self.__removeSelectedAction), \
disabled(self.__undoAction), \
disabled(self.__redoAction):
handler.create_new(QCursor.pos(), searchText)
event.accept()
return True
return False
def sceneKeyReleaseEvent(self, event):
return False
def sceneContextMenuEvent(self, event):
return False
def _setUserInteractionHandler(self, handler):
"""
Helper method for setting the user interaction handlers.
"""
if self.__scene.user_interaction_handler:
if isinstance(self.__scene.user_interaction_handler,
(interactions.ResizeArrowAnnotation,
interactions.ResizeTextAnnotation)):
self.__scene.user_interaction_handler.commit()
self.__scene.user_interaction_handler.ended.disconnect(
self.__onInteractionEnded
)
if handler:
handler.ended.connect(self.__onInteractionEnded)
# Disable actions which could change the model
set_enabled_all(self.__disruptiveActions, False)
self.__scene.set_user_interaction_handler(handler)
def __onInteractionEnded(self):
self.sender().ended.disconnect(self.__onInteractionEnded)
set_enabled_all(self.__disruptiveActions, True)
def __onSelectionChanged(self):
nodes = self.selectedNodes()
annotations = self.selectedAnnotations()
self.__openSelectedAction.setEnabled(bool(nodes))
self.__removeSelectedAction.setEnabled(
bool(nodes) or bool(annotations)
)
self.__helpAction.setEnabled(len(nodes) == 1)
self.__renameAction.setEnabled(len(nodes) == 1)
self.__duplicateSelectedAction.setEnabled(bool(nodes))
if len(nodes) > 1:
self.__openSelectedAction.setText(self.tr("Open All"))
else:
self.__openSelectedAction.setText(self.tr("Open"))
if len(nodes) + len(annotations) > 1:
self.__removeSelectedAction.setText(self.tr("Remove All"))
else:
self.__removeSelectedAction.setText(self.tr("Remove"))
if len(nodes) == 0:
self.__openSelectedAction.setText(self.tr("Open"))
self.__removeSelectedAction.setText(self.tr("Remove"))
focus = self.focusNode()
if focus is not None:
desc = focus.description
tip = whats_this_helper(desc, include_more_link=True)
else:
tip = ""
if tip != self.__quickTip:
self.__quickTip = tip
ev = QuickHelpTipEvent("", self.__quickTip,
priority=QuickHelpTipEvent.Permanent)
QCoreApplication.sendEvent(self, ev)
def __onNodeActivate(self, item):
node = self.__scene.node_for_item(item)
widget = self.scheme().widget_for_node(node)
widget.show()
widget.raise_()
widget.activateWindow()
def __onNodePositionChanged(self, item, pos):
node = self.__scene.node_for_item(item)
new = (pos.x(), pos.y())
if node not in self.__itemsMoving:
self.__itemsMoving[node] = (node.position, new)
else:
old, _ = self.__itemsMoving[node]
self.__itemsMoving[node] = (old, new)
def __onAnnotationGeometryChanged(self, item):
annot = self.scene().annotation_for_item(item)
if annot not in self.__itemsMoving:
self.__itemsMoving[annot] = (annot.geometry,
geometry_from_annotation_item(item))
else:
old, _ = self.__itemsMoving[annot]
self.__itemsMoving[annot] = (old,
geometry_from_annotation_item(item))
def __onAnnotationAdded(self, item):
log.debug("Annotation added (%r)", item)
item.setFlag(QGraphicsItem.ItemIsSelectable)
item.setFlag(QGraphicsItem.ItemIsMovable)
item.setFlag(QGraphicsItem.ItemIsFocusable)
item.installSceneEventFilter(self.__focusListener)
if isinstance(item, items.ArrowAnnotation):
pass
elif isinstance(item, items.TextAnnotation):
# Make the annotation editable.
item.setTextInteractionFlags(Qt.TextEditorInteraction)
self.__editFinishedMapper.setMapping(item, item)
item.editingFinished.connect(
self.__editFinishedMapper.map
)
self.__annotationGeomChanged.setMapping(item, item)
item.geometryChanged.connect(
self.__annotationGeomChanged.map
)
def __onAnnotationRemoved(self, item):
log.debug("Annotation removed (%r)", item)
if isinstance(item, items.ArrowAnnotation):
pass
elif isinstance(item, items.TextAnnotation):
item.editingFinished.disconnect(
self.__editFinishedMapper.map
)
item.removeSceneEventFilter(self.__focusListener)
self.__annotationGeomChanged.removeMappings(item)
item.geometryChanged.disconnect(
self.__annotationGeomChanged.map
)
def __onItemFocusedIn(self, item):
"""
Annotation item has gained focus.
"""
if not self.__scene.user_interaction_handler:
self.__startControlPointEdit(item)
def __onItemFocusedOut(self, item):
"""
Annotation item lost focus.
"""
self.__endControlPointEdit()
def __onEditingFinished(self, item):
"""
Text annotation editing has finished.
"""
annot = self.__scene.annotation_for_item(item)
text = str(item.toPlainText())
if annot.text != text:
self.__undoStack.push(
commands.TextChangeCommand(self.scheme(), annot,
annot.text, text)
)
def __toggleNewArrowAnnotation(self, checked):
if self.__newTextAnnotationAction.isChecked():
# Uncheck the text annotation action if needed.
self.__newTextAnnotationAction.setChecked(not checked)
action = self.__newArrowAnnotationAction
if not checked:
# The action was unchecked (canceled by the user)
handler = self.__scene.user_interaction_handler
if isinstance(handler, interactions.NewArrowAnnotation):
# Cancel the interaction and restore the state
handler.ended.disconnect(action.toggle)
handler.cancel(interactions.UserInteraction.UserCancelReason)
log.info("Canceled new arrow annotation")
else:
handler = interactions.NewArrowAnnotation(self)
checked = self.__arrowColorActionGroup.checkedAction()
handler.setColor(checked.data())
handler.ended.connect(action.toggle)
self._setUserInteractionHandler(handler)
def __onFontSizeTriggered(self, action):
if not self.__newTextAnnotationAction.isChecked():
# When selecting from the (font size) menu the 'Text'
# action does not get triggered automatically.
self.__newTextAnnotationAction.trigger()
else:
# Update the preferred font on the interaction handler.
handler = self.__scene.user_interaction_handler
if isinstance(handler, interactions.NewTextAnnotation):
handler.setFont(action.font())
def __toggleNewTextAnnotation(self, checked):
if self.__newArrowAnnotationAction.isChecked():
# Uncheck the arrow annotation if needed.
self.__newArrowAnnotationAction.setChecked(not checked)
action = self.__newTextAnnotationAction
if not checked:
# The action was unchecked (canceled by the user)
handler = self.__scene.user_interaction_handler
if isinstance(handler, interactions.NewTextAnnotation):
# cancel the interaction and restore the state
handler.ended.disconnect(action.toggle)
handler.cancel(interactions.UserInteraction.UserCancelReason)
log.info("Canceled new text annotation")
else:
handler = interactions.NewTextAnnotation(self)
checked = self.__fontActionGroup.checkedAction()
handler.setFont(checked.font())
handler.ended.connect(action.toggle)
self._setUserInteractionHandler(handler)
def __onArrowColorTriggered(self, action):
if not self.__newArrowAnnotationAction.isChecked():
# When selecting from the (color) menu the 'Arrow'
# action does not get triggered automatically.
self.__newArrowAnnotationAction.trigger()
else:
# Update the preferred color on the interaction handler
handler = self.__scene.user_interaction_handler
if isinstance(handler, interactions.NewArrowAnnotation):
handler.setColor(action.data())
def __onCustomContextMenuRequested(self, pos):
scenePos = self.view().mapToScene(pos)
globalPos = self.view().mapToGlobal(pos)
item = self.scene().item_at(scenePos, items.NodeItem)
if item is not None:
self.__widgetMenu.popup(globalPos)
return
item = self.scene().item_at(scenePos, items.LinkItem,
buttons=Qt.RightButton)
if item is not None:
link = self.scene().link_for_item(item)
self.__linkEnableAction.setChecked(link.enabled)
self.__contextMenuTarget = link
self.__linkMenu.popup(globalPos)
return
item = self.scene().item_at(scenePos)
if not item and \
self.__quickMenuTriggers & SchemeEditWidget.RightClicked:
action = interactions.NewNodeAction(self)
with disabled(self.__undoAction), disabled(self.__redoAction):
action.create_new(globalPos)
return
def __onRenameAction(self):
"""
Rename was requested for the selected widget.
"""
selected = self.selectedNodes()
if len(selected) == 1:
self.editNodeTitle(selected[0])
def __onHelpAction(self):
"""
Help was requested for the selected widget.
"""
nodes = self.selectedNodes()
help_url = None
if len(nodes) == 1:
node = nodes[0]
desc = node.description
help_url = "help://search?" + urlencode({"id": desc.qualified_name})
self.__showHelpFor(help_url)
def __showHelpFor(self, help_url):
"""
Show help for an "help" url.
"""
# Notify the parent chain and let them respond
ev = QWhatsThisClickedEvent(help_url)
handled = QCoreApplication.sendEvent(self, ev)
if not handled:
message_information(
self.tr("There is no documentation for this widget yet."),
parent=self)
def __toggleLinkEnabled(self, enabled):
"""
Link 'enabled' state was toggled in the context menu.
"""
if self.__contextMenuTarget:
link = self.__contextMenuTarget
command = commands.SetAttrCommand(
link, "enabled", enabled, name=self.tr("Set enabled"),
)
self.__undoStack.push(command)
def __linkRemove(self):
"""
Remove link was requested from the context menu.
"""
if self.__contextMenuTarget:
self.removeLink(self.__contextMenuTarget)
def __linkReset(self):
"""
Link reset from the context menu was requested.
"""
if self.__contextMenuTarget:
link = self.__contextMenuTarget
action = interactions.EditNodeLinksAction(
self, link.source_node, link.sink_node
)
action.edit_links()
def __duplicateSelected(self):
"""
Duplicate currently selected nodes.
"""
def copy_node(node):
x, y = node.position
return SchemeNode(
node.description, node.title, position=(x + 20, y + 20),
properties=copy.deepcopy(node.properties))
def copy_link(link, source=None, sink=None):
source = link.source_node if source is None else source
sink = link.sink_node if sink is None else sink
return SchemeLink(
source, link.source_channel,
sink, link.sink_channel,
enabled=link.enabled,
properties=copy.deepcopy(link.properties))
scheme = self.scheme()
# ensure up to date node properties (settings)
scheme.sync_node_properties()
selection = self.selectedNodes()
links = [link for link in scheme.links
if link.source_node in selection and
link.sink_node in selection]
nodedups = [copy_node(node) for node in selection]
allnames = {node.title for node in scheme.nodes + nodedups}
for nodedup in nodedups:
nodedup.title = uniquify(
nodedup.title, allnames, pattern="{item} ({_})", start=1)
node_to_dup = dict(zip(selection, nodedups))
linkdups = [copy_link(link, source=node_to_dup[link.source_node],
sink=node_to_dup[link.sink_node])
for link in links]
command = QUndoCommand(self.tr("Duplicate"))
macrocommands = []
for nodedup in nodedups:
macrocommands.append(
commands.AddNodeCommand(scheme, nodedup, parent=command))
for linkdup in linkdups:
macrocommands.append(
commands.AddLinkCommand(scheme, linkdup, parent=command))
self.__undoStack.push(command)
scene = self.__scene
for node in selection:
item = scene.item_for_node(node)
item.setSelected(False)
for node in nodedups:
item = scene.item_for_node(node)
item.setSelected(True)
def __startControlPointEdit(self, item):
"""
Start a control point edit interaction for `item`.
"""
if isinstance(item, items.ArrowAnnotation):
handler = interactions.ResizeArrowAnnotation(self)
elif isinstance(item, items.TextAnnotation):
handler = interactions.ResizeTextAnnotation(self)
else:
log.warning("Unknown annotation item type %r" % item)
return
handler.editItem(item)
self._setUserInteractionHandler(handler)
log.info("Control point editing started (%r)." % item)
def __endControlPointEdit(self):
"""
End the current control point edit interaction.
"""
handler = self.__scene.user_interaction_handler
if isinstance(handler, (interactions.ResizeArrowAnnotation,
interactions.ResizeTextAnnotation)) and \
not handler.isFinished() and not handler.isCanceled():
handler.commit()
handler.end()
log.info("Control point editing finished.")
def __updateFont(self):
"""
Update the font for the "Text size' menu and the default font
used in the `CanvasScene`.
"""
actions = self.__fontActionGroup.actions()
font = self.font()
for action in actions:
size = action.font().pixelSize()
action_font = QFont(font)
action_font.setPixelSize(size)
action.setFont(action_font)
if self.__scene:
self.__scene.setFont(font)
def __signalManagerStateChanged(self, state):
if state == signalmanager.SignalManager.Running:
self.__view.setBackgroundBrush(QBrush(Qt.NoBrush))
# self.__view.setBackgroundIcon(QIcon())
elif state == signalmanager.SignalManager.Paused:
self.__view.setBackgroundBrush(QBrush(QColor(235, 235, 235)))
# self.__view.setBackgroundIcon(QIcon("canvas_icons:Pause.svg"))
def geometry_from_annotation_item(item):
if isinstance(item, items.ArrowAnnotation):
line = item.line()
p1 = item.mapToScene(line.p1())
p2 = item.mapToScene(line.p2())
return ((p1.x(), p1.y()), (p2.x(), p2.y()))
elif isinstance(item, items.TextAnnotation):
geom = item.geometry()
return (geom.x(), geom.y(), geom.width(), geom.height())
def mouse_drag_distance(event, button=Qt.LeftButton):
"""
Return the (manhattan) distance between the mouse position
when the `button` was pressed and the current mouse position.
"""
diff = (event.buttonDownScreenPos(button) - event.screenPos())
return diff.manhattanLength()
def set_enabled_all(objects, enable):
"""
Set `enabled` properties on all objects (objects with `setEnabled` method).
"""
for obj in objects:
obj.setEnabled(enable)
# All control character categories.
_control = set(["Cc", "Cf", "Cs", "Co", "Cn"])
def is_printable(unichar):
"""
Return True if the unicode character `unichar` is a printable character.
"""
return unicodedata.category(unichar) not in _control
def node_properties(scheme):
scheme.sync_node_properties()
return [dict(node.properties) for node in scheme.nodes]
def uniquify(item, names, pattern="{item}-{_}", start=0):
candidates = (pattern.format(item=item, _=i)
for i in itertools.count(start))
candidates = itertools.dropwhile(lambda item: item in names, candidates)
return next(candidates)
|
|
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.messages.primarywindow
#
# ----------------------------------------------------------------------------
"""
Implements the various Message windows.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import logging
from enso import graphics
from enso.graphics import xmltextlayout
from enso.graphics.measurement import inchesToPoints
from enso.graphics import rounded_rect
from enso.utils.xml_tools import escape_xml
from enso.messages.windows import MessageWindow, computeWidth
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
# Total length of time from dismissal to full fade-out (in ms)
ANIMATION_TIME = 250
# Amount of time (in ms) to wait after primary message creation before
# allowing dismissal events to trigger the animation
WAIT_TIME = 80
# ----------------------------------------------------------------------------
# Visual Layout Constants
# ----------------------------------------------------------------------------
# The width, height, and margins of primary messages.
PRIM_MSG_WIDTH = inchesToPoints( 8 )
MAX_MSG_HEIGHT = inchesToPoints( 6 )
PRIM_MSG_MARGIN = inchesToPoints( .2 )
MSG_BGCOLOR = [ .2, .2, .2, .85 ]
# Text sizes for main text and captions.
SCALE = [
( 20, 12 ),
( 24, 14 ),
( 30, 18 ),
]
PRIM_TEXT_SIZE = 24
CAPTION_TEXT_SIZE = 16
LINE_SPACING = 1
# Distance between the main text block and the caption block.
CAPTION_OFFSET = 0
# ----------------------------------------------------------------------------
# The Primary Message Window class
# ----------------------------------------------------------------------------
class PrimaryMsgWind( MessageWindow ):
"""
Class that implements the primary message singleton's appearance
and behavior.
Essentially, setMessage() sets the current primary message.
Immediately after the message is set, it is rendered, and the
class goes into a brief wait cycle, so that user actions don't make
the message disappear before it can be seen.
After this wait cycle is completed, the singleton registers itself
as a responder to dismissal events. When a dismissal event
happens, the singleton animates the fading out of the primary message.
Also, the singleton notifies the message manager that the primary
message has been dismissed.
"""
def __init__( self, msgMan, eventManager ):
"""
Initializes the PrimaryMessage singleton
"""
# Instantiate the underlying MessageWindow to the
# maxsize suggested by the module constants.
width = min( PRIM_MSG_WIDTH,
graphics.getDesktopSize()[0]-1 )
height = min( MAX_MSG_HEIGHT,
graphics.getDesktopSize()[1]-1 )
maxSize = ( width, height )
MessageWindow.__init__( self, maxSize )
self.__evtManager = eventManager
self.__msgManager = msgMan
self.__msg = None
self.__waiting = False
self.__animating = False
def setMessage( self, message ):
"""
Sets the current primary message to "message".
"""
if self.__msg != None:
# If there already is a primary message, then "interrupt" it:
self.__interrupt()
# Set the current primary message, and draw it.
self.__msg = message
self.__drawMessage()
# Now, set a time-responder to wait for a bit, so that the
# user doesn't accidentally clear the message before it registers
# as existing.
self.__timeSinceCreated = 0
self.__evtManager.registerResponder( self.waitTick, "timer" )
self.__waiting = True
def onDismissal( self ):
"""
Called on a dismissal event, to start the animation process
and make sure the underlying message does what it needs to
when it ceases being a primary message.
"""
self.__msgManager.onDismissal()
self.__evtManager.removeResponder( self.onDismissal )
self.__timeSinceDismissal = 0
self.__evtManager.registerResponder( self.animationTick, "timer" )
self.__animating = True
def animationTick( self, msPassed ):
"""
Called on a timer event to animate the window's fadeout.
"""
self.__timeSinceDismissal += msPassed
if self.__timeSinceDismissal > ANIMATION_TIME:
self.__onAnimationFinished()
return
timeLeft = ANIMATION_TIME - self.__timeSinceDismissal
frac = timeLeft / float(ANIMATION_TIME)
opacity = int( 255*frac )
self._wind.setOpacity( opacity )
self._wind.update()
def waitTick ( self, msPassed ):
"""
Called on a timer event, to give some time between the message
appearing and when it can disappear.
"""
self.__timeSinceCreated += msPassed
if self.__timeSinceCreated > WAIT_TIME:
self.__evtManager.registerResponder( self.onDismissal,
"dismissal" )
self.__evtManager.removeResponder( self.waitTick )
self.__waiting = False
# The following message may be used by system tests.
logging.info( "newMessage: %s" % self.__msg.getPrimaryXml() )
def __position( self ):
"""
Centers the message window horizontally using the current size.
"""
desksize = graphics.getDesktopSize()
left, top = graphics.getDesktopOffset()
xPos = ((desksize[0] - self.getSize()[0]) / 2) + left
# Set the height based on the "maximum" height, so that the
# message always appears at the same vertical offset from the
# top of the screen.
yPos = ( desksize[1] - self.getMaxSize()[1] ) / 2
self.setPos( xPos, yPos )
def __interrupt( self ):
"""
"interrupts" the current primary message, terminating
its animation, and/or
"""
if self.__msg != None:
# If there's an old message, then we've got an
# event responder registered:
if self.__waiting:
self.__evtManager.removeResponder( self.waitTick )
self.__waiting = False
if self.__animating:
self.__evtManager.removeResponder( self.animationTick )
self.__animating = False
else:
self.__evtManager.removeResponder( self.onDismissal )
if self.__waiting:
self.__evtManager.removeResponder( self.waitTick )
def __drawMessage( self ):
"""
Draws the current message to the underlying Cario context.
"""
# This function is the master drawing function; all layout and
# rendering methods are called from here.
text = self.__msg.getPrimaryXml()
self.clearWindow()
msgText, capText = splitContent( text )
width,height = self.getMaxSize()
width -= 2*PRIM_MSG_MARGIN
height -= 2*PRIM_MSG_MARGIN
msgDoc, capDoc = self.__layoutText( msgText,
capText,
width,
height )
width, height, msgPos, capPos = \
self.__layoutBlocks( msgDoc, capDoc )
# Set the window size and draw the outlining rectangle
self.__setupBackground( width, height )
# Draw the text.
msgDoc.draw( msgPos[0], msgPos[1], self._context )
if capDoc != None:
capDoc.draw( capPos[0], capPos[1], self._context )
# Set the window opacity (which can be left at 0 by the animation)
self._wind.setOpacity( 255 )
# Show and update the window.
self.show()
def __isOneLineMsg( self, msgDoc, capDoc ):
"""
Determines whether msgDoc and capDoc are both one line.
"""
numMsgLines = 0
for block in ( msgDoc.blocks ):
numMsgLines += len( block.lines )
numCapLines = 0
for block in ( capDoc.blocks ):
numCapLines += len( block.lines )
return (numCapLines == 1 and numMsgLines == 1)
def __layoutText( self, msgText, capText, width, height ):
"""
Lays out msgText and capText into two seperate document
objects.
Returns a tuple: ( msgDoc, capDoc )
NOTE: capDoc can be None, if capText is None.
"""
root = "<document>%s</document>"
for msgSize, capSize in reversed( SCALE[1:] ):
try:
msgDoc = layoutMessageXml(
xmlMarkup = root % msgText,
width = width,
height = height,
size = msgSize,
)
if capText != None:
capDoc = layoutMessageXml(
xmlMarkup = root % capText,
width = width,
height = height - msgDoc.height,
size = capSize
)
else:
capDoc = None
return msgDoc, capDoc
except Exception:
# TODO: Lookup exact error.
pass
# This time, ellipsify.
msgSize, capSize = SCALE[0]
msgDoc = layoutMessageXml(
xmlMarkup = root % msgText,
width = width,
height = height * .8,
size = msgSize,
ellipsify = "true",
)
if capText != None:
capDoc = layoutMessageXml(
xmlMarkup = root % capText,
width = width,
height = height * .2,
size = capSize,
ellipsify = "true",
)
else:
capDoc = None
return msgDoc, capDoc
def __setupBackground( self, width, height ):
"""
Given a text region of width and height, sets the size of the
underlying window to be that plus margins, and draws a rounded
background rectangle.
"""
width += (2*PRIM_MSG_MARGIN)-2
height += (2*PRIM_MSG_MARGIN)-2
width = int(width)
height = int(height)
assert width <= self.getMaxSize()[0], \
"width %s, self.getMaxSize()[0] %s" \
% (width, self.getMaxSize()[0])
self.setSize( width, height )
self.__position()
cr = self._context
rounded_rect.drawRoundedRect(
context = cr,
rect = ( 0, 0, width, height ),
softenedCorners = rounded_rect.ALL_CORNERS,
)
cr.set_source_rgba( *MSG_BGCOLOR )
cr.fill_preserve()
def __layoutBlocks( self, messageDoc, captionDoc ):
"""
Determines how the documents messageDoc and captionDoc should
be combined to form a complete message window.
Returns a tuple:
( width, height, messagePosition, captionPosition )
"""
capDoc, msgDoc = captionDoc, messageDoc
if capDoc == None:
width = computeWidth( msgDoc )
height = msgDoc.height
msgPos = ( PRIM_MSG_MARGIN, PRIM_MSG_MARGIN )
capPos = None
elif self.__isOneLineMsg( msgDoc, capDoc ):
msgWidth = computeWidth( msgDoc )
capWidth = computeWidth( capDoc )
width = max( msgWidth, capWidth )
height = msgDoc.height + capDoc.height
msgPos = ( PRIM_MSG_MARGIN + ( ( width - msgWidth ) / 2 ),
PRIM_MSG_MARGIN )
capPos = ( PRIM_MSG_MARGIN + ( ( width - capWidth ) / 2 ),
msgPos[1] + msgDoc.height )
else:
msgWidth = computeWidth( msgDoc )
capWidth = computeWidth( capDoc )
width = max( msgWidth, capWidth )
height = msgDoc.height + capDoc.height
msgPos = ( PRIM_MSG_MARGIN, PRIM_MSG_MARGIN )
capPos = ( width - capWidth + PRIM_MSG_MARGIN,
msgPos[1] + msgDoc.height )
return width, height, msgPos, capPos
def __onAnimationFinished( self ):
"""
Called when the animation is finished.
"""
if self.__animating:
self.__evtManager.removeResponder( self.animationTick )
self.__animating = False
self.hide()
self.__msg = None
self.__msgManager.onPrimaryMessageFinished()
# ----------------------------------------------------------------------------
# Xml Layout
# ----------------------------------------------------------------------------
# The master style registry for primary messages.
_styles = xmltextlayout.StyleRegistry()
_styles.add(
"document",
margin_top = "0.0pt",
margin_bottom = "0.0pt",
font_family = "Gentium",
font_style = "normal",
max_lines = "0",
ellipsify = "false",
text_align = "left",
)
_styles.add(
"p",
color = "#ffffff",
margin_top = "0pt",
margin_bottom = "0pt",
)
_styles.add(
"caption",
color = "#669900",
margin_top = "%spt" % CAPTION_OFFSET,
margin_bottom = "0pt",
)
_styles.add(
"command",
color = "#669900",
)
# The tag aliases for primary message XML.
_tagAliases = xmltextlayout.XmlMarkupTagAliases()
_tagAliases.add( "p", baseElement = "block" )
_tagAliases.add( "caption", baseElement = "block" )
_tagAliases.add( "command", baseElement = "inline" )
def layoutMessageXml( xmlMarkup, width, size, height, ellipsify="false",
raiseLayoutExceptions=False ):
"""
Lays out the xmlMarkup in a block that is width wide.
if raiseLayoutExceptions is False, then this function will
suppress any exceptions raised when parsing xmlMarkup and replace
it with a message that tells the end-user that the message was
broken, providing the end-user with as much of the original
message as possible. If raiseLayoutExceptions is True, however,
any exceptions raised will be passed through to the caller.
"""
maxLines = int( height / (size*LINE_SPACING) )
_styles.update( "document",
width = "%fpt" % width,
line_height = "%spt" % int(size*LINE_SPACING),
max_lines = maxLines,
font_size = "%spt" % size,
ellipsify = ellipsify,
)
try:
document = xmltextlayout.xmlMarkupToDocument(
xmlMarkup,
_styles,
_tagAliases
)
except Exception, e:
if raiseLayoutExceptions:
raise
logging.warn( "Could not layout message text %s; got error %s"
% ( xmlMarkup, e ) )
document = xmltextlayout.xmlMarkupToDocument(
"<document><p>%s</p>%s</document>" %
( escape_xml( xmlMarkup.strip() ),
"<caption>from a broken message</caption>" ),
_styles,
_tagAliases
)
return document
def splitContent( messageXml ):
"""
Splits messageXml into two parts: main, and caption.
"""
capLocation = messageXml.find( "<caption>" )
if capLocation == -1:
return ( messageXml, None )
else:
return ( messageXml[:capLocation], messageXml[capLocation:] )
|
|
# Copyright 2015-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources.elb import ELB, SetSslListenerPolicy
class ELBTagTest(BaseTest):
def test_elb_tag_and_remove(self):
self.patch(ELB, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_elb_tag_and_remove")
client = session_factory().client("elb")
policy = self.load_policy(
{
"name": "elb-tag",
"resource": "elb",
"filters": [{"LoadBalancerName": "CloudCustodian"}],
"actions": [{"type": "tag", "key": "xyz", "value": "abdef"}],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.describe_tags(LoadBalancerNames=["CloudCustodian"])[
"TagDescriptions"
][
0
][
"Tags"
]
tag_map = {t["Key"]: t["Value"] for t in tags}
self.assertTrue("xyz" in tag_map)
policy = self.load_policy(
{
"name": "elb-tag",
"resource": "elb",
"filters": [{"LoadBalancerName": "CloudCustodian"}],
"actions": [{"type": "remove-tag", "tags": ["xyz"]}],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.describe_tags(LoadBalancerNames=["CloudCustodian"])[
"TagDescriptions"
][
0
][
"Tags"
]
tag_map = {t["Key"]: t["Value"] for t in tags}
self.assertFalse("xyz" in tag_map)
def test_elb_tags(self):
self.patch(ELB, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_elb_tags")
policy = self.load_policy(
{
"name": "elb-mark",
"resource": "elb",
"filters": [{"tag:Platform": "ubuntu"}],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "CloudCustodian")
def test_mark_and_match(self):
session_factory = self.replay_flight_data("test_elb_mark_and_match")
policy = self.load_policy(
{
"name": "elb-mark",
"resource": "elb",
"filters": [{"LoadBalancerName": "CloudCustodian"}],
"actions": [
{
"type": "mark-for-op",
"op": "delete",
"tag": "custodian_next",
"days": 1,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = session_factory().client("elb").describe_tags(
LoadBalancerNames=["CloudCustodian"]
)[
"TagDescriptions"
][
0
][
"Tags"
]
tag_map = {t["Key"]: t["Value"] for t in tags}
self.assertTrue("custodian_next" in tag_map)
policy = self.load_policy(
{
"name": "elb-mark-filter",
"resource": "elb",
"filters": [
{"type": "marked-for-op", "tag": "custodian_next", "op": "delete"}
],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
class ELBInstance(BaseTest):
def test_instance_filter(self):
session_factory = self.replay_flight_data("test_elb_instance_filter")
policy = self.load_policy(
{
"name": "elb-instance",
"resource": "elb",
"filters": [
{"type": "instance", "key": "ImageId", "value": "ami-40d28157"}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "balanced")
class HealthCheckProtocolMismatchTest(BaseTest):
def test_healthcheck_protocol_mismatch(self):
session_factory = self.replay_flight_data("test_healthcheck_protocol_mismatch")
policy = self.load_policy(
{
"name": "healthcheck-protocol-mismatch",
"resource": "elb",
"filters": [{"type": "healthcheck-protocol-mismatch"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 3)
# make sure we matched the right load balcners
elb_names = {elb["LoadBalancerName"] for elb in resources}
self.assertEqual(
elb_names,
{
"test-elb-no-listeners",
"test-elb-protocol-matches",
"test-elb-multiple-listeners",
},
)
class SSLPolicyTest(BaseTest):
def test_ssl_ciphers(self):
session_factory = self.replay_flight_data("test_ssl_ciphers")
policy = self.load_policy(
{
"name": "test-ssl-ciphers",
"resource": "elb",
"filters": [{"type": "ssl-policy", "blacklist": ["Protocol-SSLv2"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-elb-invalid-policy")
def test_set_ssl_listener_policy_fail(self):
session_factory = self.replay_flight_data("test_set_ssl_listener")
self.patch(SetSslListenerPolicy, 'process_elb', lambda self, client, elb: elb.xyz)
policy = self.load_policy({
"name": "test-set-ssl-listerner",
"resource": "elb",
"filters": [{'LoadBalancerName': 'test-elb'}],
"actions": [{
"type": "set-ssl-listener-policy",
"name": "testpolicy",
"attributes": ["AES128-SHA256", "Protocol-TLSv1"]}]},
session_factory=session_factory)
self.assertRaises(AttributeError, policy.run)
def test_set_ssl_listener_policy(self):
session_factory = self.replay_flight_data("test_set_ssl_listener")
client = session_factory().client("elb")
policy = self.load_policy(
{
"name": "test-set-ssl-listerner",
"resource": "elb",
"filters": [
{
"type": "ssl-policy",
"whitelist": ["AES128-SHA256", "Protocol-TLSv1"],
},
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-elb",
"op": "eq",
},
],
"actions": [
{
"type": "set-ssl-listener-policy",
"name": "testpolicy",
"attributes": ["AES128-SHA256", "Protocol-TLSv1"],
}
],
},
session_factory=session_factory,
)
policy.run()
response_pol = client.describe_load_balancers(LoadBalancerNames=["test-elb"])
response_ciphers = client.describe_load_balancer_policies(
LoadBalancerName="test-elb", PolicyNames=["testpolicy-1493768308000"]
)
curr_pol = response_pol["LoadBalancerDescriptions"][0]["ListenerDescriptions"][
0
][
"PolicyNames"
]
curr_ciphers = []
for x in response_ciphers["PolicyDescriptions"][0][
"PolicyAttributeDescriptions"
]:
curr_ciphers.append({str(k): str(v) for k, v in x.items()})
active_ciphers = [
x["AttributeName"] for x in curr_ciphers if x["AttributeValue"] == "true"
]
self.assertEqual(
curr_pol,
[
"AWSConsole-LBCookieStickinessPolicy-test-elb-1493748038333",
"testpolicy-1493768308000",
],
)
self.assertEqual(active_ciphers, ["Protocol-TLSv1", "AES128-SHA256"])
def test_ssl_matching(self):
session_factory = self.replay_flight_data("test_ssl_ciphers")
policy = self.load_policy(
{
"name": "test-ssl-matching",
"resource": "elb",
"filters": [
{
"type": "ssl-policy",
"matching": "^Protocol-",
"whitelist": [
"Protocol-TLSv1", "Protocol-TLSv1.1", "Protocol-TLSv1.2"
],
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-elb-invalid-policy")
def test_filter_validation_no_blacklist(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "test-ssl-ciphers",
"resource": "elb",
"filters": [{"type": "ssl-policy"}],
},
session_factory=None,
validate=False,
)
def test_filter_validation_blacklist_not_iterable(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "test-ssl-ciphers",
"resource": "elb",
"filters": [{"type": "ssl-policy", "blacklist": "single-value"}],
},
session_factory=None,
validate=False,
)
class TestDefaultVpc(BaseTest):
def test_elb_default_vpc(self):
session_factory = self.replay_flight_data("test_elb_default_vpc")
p = self.load_policy(
{
"name": "elb-default-filters",
"resource": "elb",
"filters": [{"type": "default-vpc"}],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-load-balancer")
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_elb_remove_security_groups(self):
# Test conditions:
# - running ELB in default VPC
# - security group named TEST-PROD-ONLY-SG exists in VPC and is
# attached to test ELB
session_factory = self.replay_flight_data("test_elb_remove_security_groups")
client = session_factory().client("ec2")
default_sg_id = client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][
0
][
"GroupId"
]
p = self.load_policy(
{
"name": "elb-modify-security-groups-filter",
"resource": "elb",
"filters": [
{
"type": "security-group",
"key": "GroupName",
"value": "(.*PROD-ONLY.*)",
"op": "regex",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "matched",
"isolation-group": default_sg_id,
}
],
},
session_factory=session_factory,
)
resources = p.run()
clean_resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-load-balancer")
self.assertEqual(len(clean_resources), 0)
def test_elb_add_security_group(self):
# Test conditions:
# - running one ELB with 'default' VPC security group attached
# - security group named TEST-PROD-ONLY-SG exists in VPC and is not
# attached to ELB
session_factory = self.replay_flight_data("test_elb_add_security_group")
policy = self.load_policy(
{
"name": "add-sg-to-prod-elb",
"resource": "elb",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-load-balancer",
},
],
"actions": [{"type": "modify-security-groups", "add": "sg-411b413c"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
policy.validate()
after_resources = policy.run()
self.assertEqual(len(after_resources[0]["SecurityGroups"]), 2)
def test_elb_add_security_groups(self):
# Test conditions:
# - running one ELB with 'default' VPC security group attached
# - security groups named TEST-PROD-ONLY-SG, TEST-SG1, and TEST-SG2
# exist in VPC - not attached to ELB
session_factory = self.replay_flight_data("test_elb_add_security_groups")
policy = self.load_policy(
{
"name": "add-sgs-to-prod-elb",
"resource": "elb",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-load-balancer",
},
],
"actions": [
{
"type": "modify-security-groups",
"add": ["sg-411b413c", "sg-8a4b64f7", "sg-5d4a6520"],
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
policy.validate()
after_resources = policy.run()
self.assertEqual(len(after_resources[0]["SecurityGroups"]), 4)
def test_elb_remove_all_security_groups(self):
# Test conditions:
# - running one ELB with 'default' and 'TEST-PROD-ONLY-SG' VPC
# security groups attached
session_factory = self.replay_flight_data("test_elb_remove_all_security_groups")
client = session_factory().client("ec2")
default_sg_id = client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][
0
][
"GroupId"
]
policy = self.load_policy(
{
"name": "add-sg-to-prod-elb",
"resource": "elb",
"filters": [
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-load-balancer",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "all",
"isolation-group": default_sg_id,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources[0]["SecurityGroups"]), 2)
policy.validate()
after_resources = policy.run()
self.assertEqual(len(after_resources[0]["SecurityGroups"]), 1)
# Check that it is indeed the isolation group on the ELB
self.assertEqual(after_resources[0]["SecurityGroups"][0], default_sg_id)
class TestElbLogging(BaseTest):
def test_enable_s3_logging(self):
session_factory = self.replay_flight_data("test_elb_enable_s3_logging")
policy = self.load_policy(
{
"name": "test-enable-s3-logging",
"resource": "elb",
"filters": [
{"type": "value", "key": "LoadBalancerName", "value": "elb1"}
],
"actions": [
{
"type": "enable-s3-logging",
"bucket": "elbv2logtest",
"prefix": "elblogs",
"emit_interval": 5,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
client = session_factory().client("elb")
for elb in resources:
elb_name = elb["LoadBalancerName"]
results = client.describe_load_balancer_attributes(
LoadBalancerName=elb_name
)
elb["Attributes"] = results["LoadBalancerAttributes"]
self.assertEqual(resources[0]["Attributes"]["AccessLog"]["EmitInterval"], 5)
self.assertEqual(
resources[0]["Attributes"]["AccessLog"]["S3BucketName"], "elbv2logtest"
)
self.assertEqual(
resources[0]["Attributes"]["AccessLog"]["S3BucketPrefix"], "elblogs"
)
self.assertTrue(resources[0]["Attributes"]["AccessLog"]["Enabled"])
def test_disable_s3_logging(self):
session_factory = self.replay_flight_data("test_elb_disable_s3_logging")
policy = self.load_policy(
{
"name": "test-disable-s3-logging",
"resource": "elb",
"filters": [
{"type": "value", "key": "LoadBalancerName", "value": "elb1"}
],
"actions": [{"type": "disable-s3-logging"}],
},
session_factory=session_factory,
)
resources = policy.run()
client = session_factory().client("elb")
for elb in resources:
elb_name = elb["LoadBalancerName"]
results = client.describe_load_balancer_attributes(
LoadBalancerName=elb_name
)
elb["Attributes"] = results["LoadBalancerAttributes"]
self.assertFalse(resources[0]["Attributes"]["AccessLog"]["Enabled"])
class TestElbIsLoggingFilter(BaseTest):
""" replicate
- name: elb-is-logging-to-bucket-test
resource: elb
filters:
- type: is-logging
bucket: elbv2logtest
"""
def test_is_logging_to_bucket(self):
session_factory = self.replay_flight_data("test_elb_is_logging_filter")
policy = self.load_policy(
{
"name": "elb-is-logging-to-bucket-test",
"resource": "elb",
"filters": [{"type": "is-logging", "bucket": "elbv2logtest"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertGreater(
len(resources), 0, "Test should find elbs logging " "to elbv2logtest"
)
class TestElbIsNotLoggingFilter(BaseTest):
""" replicate
- name: elb-is-not-logging-to-bucket-test
resource: elb
filters:
- type: is-not-logging
bucket: otherbucket
"""
def test_is_logging_to_bucket(self):
session_factory = self.replay_flight_data("test_elb_is_logging_filter")
policy = self.load_policy(
{
"name": "elb-is-not-logging-to-bucket-test",
"resource": "elb",
"filters": [{"type": "is-not-logging", "bucket": "otherbucket"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertGreater(
len(resources), 0, "Should find elb not logging " "to otherbucket"
)
class TestElbAttributeFilter(BaseTest):
def test_is_connection_draining(self):
""" replicate
- name: elb-is-connection-draining-test
resource: elb
filters:
- type: attributes
key: ConnectionDraining.Enabled
value: true
op: eq
"""
session_factory = self.replay_flight_data("test_elb_attribute_filter")
policy = self.load_policy(
{
"name": "elb-is-connection-draining-test",
"resource": "elb",
"filters": [
{
"type": "attributes",
"key": "ConnectionDraining.Enabled",
"value": True,
"op": "eq"
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(
len(resources), 1, "Test should find one elb connection draining"
)
self.assertEqual(
resources[0]['Attributes']['ConnectionDraining']['Enabled'], True
)
def test_is_not_connection_draining(self):
""" replicate
- name: elb-is-not-connection-draining-test
resource: elb
filters:
- type: attributes
key: ConnectionDraining.Enabled
value: true
op: eq
"""
session_factory = self.replay_flight_data("test_elb_attribute_filter")
policy = self.load_policy(
{
"name": "elb-is-not-connection-draining-test",
"resource": "elb",
"filters": [
{
"type": "attributes",
"key": "ConnectionDraining.Enabled",
"value": False,
"op": "eq"
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(
len(resources), 0, "Test should find no elbs without connection draining "
)
def test_is_cross_zone_load_balancing(self):
""" replicate
- name: elb-is-cross-zone-load-balancing-test
resource: elb
filters:
- type: attributes
key: CrossZoneLoadBalancing.Enabled
value: true
op: eq
"""
session_factory = self.replay_flight_data("test_elb_attribute_filter")
policy = self.load_policy(
{
"name": "elb-is-cross-zone-load-balancing-test",
"resource": "elb",
"filters": [
{
"type": "attributes",
"key": "CrossZoneLoadBalancing.Enabled",
"value": True,
"op": "eq"
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(
len(resources), 1, "Test should find one elb cross zone load balancing"
)
self.assertEqual(
resources[0]['Attributes']['CrossZoneLoadBalancing']['Enabled'], True
)
def test_is_not_cross_zone_load_balancing(self):
""" replicate
- name: elb-is-not-cross-zone-load-balancing
resource: elb
filters:
- type: attributes
key: CrossZoneLoadBalancing.Enabled
value: false
op: eq
"""
session_factory = self.replay_flight_data("test_elb_attribute_filter")
policy = self.load_policy(
{
"name": "elb-is-not-cross-zone-load-balancing-test",
"resource": "elb",
"filters": [
{
"type": "attributes",
"key": "CrossZoneLoadBalancing.Enabled",
"value": False,
"op": "eq"
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(
len(resources), 0, "Test should find no elbs not cross zone load balancing"
)
def test_idle_time_greater_than_30(self):
""" replicate
- name: elb-idle-timeout-test
resource: elb
filters:
- type: attributes
key: ConnectionSettings.IdleTimeout
value: 30
op: gt
"""
session_factory = self.replay_flight_data("test_elb_attribute_filter")
policy = self.load_policy(
{
"name": "elb-idle-timeout-greater-than-30",
"resource": "elb",
"filters": [
{
"type": "attributes",
"key": "ConnectionSettings.IdleTimeout",
"value": 30,
"op": "gt"
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(
len(resources), 1, "Test should find 1 elb with idle timeout > 30 seconds"
)
self.assertGreater(
resources[0]['Attributes']['ConnectionSettings']['IdleTimeout'], 30
)
def test_idle_time_less_than_30(self):
""" replicate
- name: elb-idle-timeout-test
resource: elb
filters:
- type: attributes
key: ConnectionSettings.IdleTimeout
value: 30
op: lt
"""
session_factory = self.replay_flight_data("test_elb_attribute_filter")
policy = self.load_policy(
{
"name": "elb-idle-timeout-less-than-30",
"resource": "elb",
"filters": [
{
"type": "attributes",
"key": "ConnectionSettings.IdleTimeout",
"value": 30,
"op": "lt"
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(
len(resources), 0, "Test should find 0 elbs with idle timeout < 30 seconds"
)
|
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for master slave connections."""
import datetime
import os
import sys
import time
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo import ReadPreference
from pymongo.errors import ConnectionFailure, InvalidName
from pymongo.errors import CollectionInvalid, OperationFailure
from pymongo.errors import AutoReconnect
from pymongo.database import Database
from pymongo.connection import Connection
from pymongo.collection import Collection
from pymongo.master_slave_connection import MasterSlaveConnection
class TestMasterSlaveConnection(unittest.TestCase):
def setUp(self):
host = os.environ.get("DB_IP", "localhost")
self.master = Connection(host, int(os.environ.get("DB_PORT", 27017)))
self.slaves = []
try:
self.slaves.append(Connection(os.environ.get("DB_IP2", host),
int(os.environ.get("DB_PORT2", 27018)),
read_preference=ReadPreference.SECONDARY))
except ConnectionFailure:
pass
try:
self.slaves.append(Connection(os.environ.get("DB_IP3", host),
int(os.environ.get("DB_PORT3", 27019)),
read_preference=ReadPreference.SECONDARY))
except ConnectionFailure:
pass
if not self.slaves:
raise SkipTest()
self.connection = MasterSlaveConnection(self.master, self.slaves)
self.db = self.connection.pymongo_test
def test_types(self):
self.assertRaises(TypeError, MasterSlaveConnection, 1)
self.assertRaises(TypeError, MasterSlaveConnection, self.master, 1)
self.assertRaises(TypeError, MasterSlaveConnection, self.master, [1])
def test_repr(self):
self.assertEqual(repr(self.connection),
"MasterSlaveConnection(%r, %r)" %
(self.master, self.slaves))
def test_disconnect(self):
class Connection(object):
def __init__(self):
self._disconnects = 0
def disconnect(self):
self._disconnects += 1
self.connection._MasterSlaveConnection__master = Connection()
self.connection._MasterSlaveConnection__slaves = [Connection(),
Connection()]
self.connection.disconnect()
self.assertEqual(1,
self.connection._MasterSlaveConnection__master._disconnects)
self.assertEqual(1,
self.connection._MasterSlaveConnection__slaves[0]._disconnects)
self.assertEqual(1,
self.connection._MasterSlaveConnection__slaves[1]._disconnects)
def test_continue_until_slave_works(self):
class Slave(object):
calls = 0
def __init__(self, fail):
self._fail = fail
def _send_message_with_response(self, *args, **kwargs):
Slave.calls += 1
if self._fail:
raise AutoReconnect()
return 'sent'
class NotRandomList(object):
last_idx = -1
def __init__(self):
self._items = [Slave(True), Slave(True),
Slave(False), Slave(True)]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
NotRandomList.last_idx = idx
return self._items.pop(0)
self.connection._MasterSlaveConnection__slaves = NotRandomList()
response = self.connection._send_message_with_response('message')
self.assertEqual((NotRandomList.last_idx, 'sent'), response)
self.assertNotEqual(-1, NotRandomList.last_idx)
self.assertEqual(3, Slave.calls)
def test_raise_autoreconnect_if_all_slaves_fail(self):
class Slave(object):
calls = 0
def __init__(self, fail):
self._fail = fail
def _send_message_with_response(self, *args, **kwargs):
Slave.calls += 1
if self._fail:
raise AutoReconnect()
return 'sent'
class NotRandomList(object):
def __init__(self):
self._items = [Slave(True), Slave(True),
Slave(True), Slave(True)]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
return self._items.pop(0)
self.connection._MasterSlaveConnection__slaves = NotRandomList()
self.assertRaises(AutoReconnect,
self.connection._send_message_with_response, 'message')
self.assertEqual(4, Slave.calls)
def test_get_db(self):
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, self.connection, "")
self.assertRaises(InvalidName, make_db, self.connection, "te$t")
self.assertRaises(InvalidName, make_db, self.connection, "te.t")
self.assertRaises(InvalidName, make_db, self.connection, "te\\t")
self.assertRaises(InvalidName, make_db, self.connection, "te/t")
self.assertRaises(InvalidName, make_db, self.connection, "te st")
self.assertTrue(isinstance(self.connection.test, Database))
self.assertEqual(self.connection.test, self.connection["test"])
self.assertEqual(self.connection.test, Database(self.connection,
"test"))
def test_database_names(self):
self.connection.pymongo_test.test.save({"dummy": u"object"})
self.connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = self.connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
self.assertRaises(TypeError, self.connection.drop_database, 5)
self.assertRaises(TypeError, self.connection.drop_database, None)
raise SkipTest("This test often fails due to SERVER-2329")
self.connection.pymongo_test.test.save({"dummy": u"object"}, safe=True)
dbs = self.connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.connection.drop_database("pymongo_test")
dbs = self.connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
self.connection.pymongo_test.test.save({"dummy": u"object"})
dbs = self.connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.connection.drop_database(self.connection.pymongo_test)
dbs = self.connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
def test_iteration(self):
def iterate():
[a for a in self.connection]
self.assertRaises(TypeError, iterate)
def test_insert_find_one_in_request(self):
count = 0
for i in range(100):
self.connection.start_request()
self.db.test.remove({})
self.db.test.insert({"x": i})
try:
if i != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.connection.end_request()
self.assertFalse(count)
# This was failing because commands were being sent to the slaves
def test_create_collection(self):
self.connection.pymongo_test.test.drop()
collection = self.db.create_collection('test')
self.assertTrue(isinstance(collection, Collection))
self.assertRaises(CollectionInvalid, self.db.create_collection, 'test')
# Believe this was failing for the same reason...
def test_unique_index(self):
self.connection.pymongo_test.test.drop()
self.db.test.create_index('username', unique=True)
self.db.test.save({'username': 'mike'}, safe=True)
self.assertRaises(OperationFailure,
self.db.test.save, {'username': 'mike'}, safe=True)
# NOTE this test is non-deterministic, but I expect
# some failures unless the db is pulling instantaneously...
def test_insert_find_one_with_slaves(self):
count = 0
for i in range(100):
self.db.test.remove({})
self.db.test.insert({"x": i})
try:
if i != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.assertTrue(count)
# NOTE this test is non-deterministic, but hopefully we pause long enough
# for the slaves to pull...
def test_insert_find_one_with_pause(self):
count = 0
self.db.test.remove({})
self.db.test.insert({"x": 5586})
time.sleep(11)
for _ in range(10):
try:
if 5586 != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.assertFalse(count)
def test_kill_cursor_explicit(self):
c = self.connection
c.slave_okay = True
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert(
[{"i": i} for i in range(20)], safe=True, w=1 + len(self.slaves))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
self.assertNotEqual(
cursor._Cursor__connection_id,
-1,
"Expected cursor connected to a slave, not master")
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_base_object(self):
c = self.connection
self.assertFalse(c.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertFalse(c.safe)
self.assertEqual({}, c.get_lasterror_options())
db = c.test
self.assertFalse(db.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertFalse(db.safe)
self.assertEqual({}, db.get_lasterror_options())
coll = db.test
coll.drop()
self.assertFalse(coll.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertFalse(coll.safe)
self.assertEqual({}, coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
self.assertTrue(bool(cursor._Cursor__read_preference))
c.safe = True
w = 1 + len(self.slaves)
wtimeout=10000 # Wait 10 seconds for replication to complete
c.set_lasterror_options(w=w, wtimeout=wtimeout)
self.assertFalse(c.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(c.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout}, c.get_lasterror_options())
db = c.test
self.assertFalse(db.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(db.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout}, db.get_lasterror_options())
coll = db.test
self.assertFalse(coll.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(coll.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout},
coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
self.assertTrue(bool(cursor._Cursor__read_preference))
coll.insert({'foo': 'bar'})
self.assertEqual(1, coll.find({'foo': 'bar'}).count())
self.assertTrue(coll.find({'foo': 'bar'}))
coll.remove({'foo': 'bar'})
self.assertEqual(0, coll.find({'foo': 'bar'}).count())
# Set self.connection back to defaults
c.safe = False
c.unset_lasterror_options()
self.assertFalse(self.connection.slave_okay)
self.assertTrue(bool(self.connection.read_preference))
self.assertFalse(self.connection.safe)
self.assertEqual({}, self.connection.get_lasterror_options())
def test_document_class(self):
c = MasterSlaveConnection(self.master, self.slaves)
db = c.pymongo_test
w = 1 + len(self.slaves)
db.test.insert({"x": 1}, safe=True, w=w)
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c = MasterSlaveConnection(self.master, self.slaves, document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
def test_tz_aware(self):
dt = datetime.datetime.utcnow()
conn = MasterSlaveConnection(self.master, self.slaves)
self.assertEqual(False, conn.tz_aware)
db = conn.pymongo_test
w = 1 + len(self.slaves)
db.tztest.insert({'dt': dt}, safe=True, w=w)
self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo)
conn = MasterSlaveConnection(self.master, self.slaves, tz_aware=True)
self.assertEqual(True, conn.tz_aware)
db = conn.pymongo_test
db.tztest.insert({'dt': dt}, safe=True, w=w)
self.assertEqual(utc, db.tztest.find_one()['dt'].tzinfo)
conn = MasterSlaveConnection(self.master, self.slaves, tz_aware=False)
self.assertEqual(False, conn.tz_aware)
db = conn.pymongo_test
db.tztest.insert({'dt': dt}, safe=True, w=w)
self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo)
if __name__ == "__main__":
unittest.main()
|
|
import time
import logging
logger = logging.getLogger(__name__)
import synapse.glob as s_glob
import synapse.synasync as s_async
import synapse.common as s_common
import synapse.eventbus as s_eventbus
import synapse.telepath as s_telepath
import synapse.lib.tags as s_tags
import synapse.lib.scope as s_scope
import synapse.lib.reflect as s_reflect
import synapse.lib.thishost as s_thishost
def openurl(url, **opts):
'''
Open a remote service bus and return a SvcProxy class.
Example:
svcprox = openbus('tcp://svcbus.com/mybus')
'''
svcbus = s_telepath.openurl(url, **opts)
return SvcProxy(svcbus)
class SvcBus(s_eventbus.EventBus):
def __init__(self):
s_eventbus.EventBus.__init__(self)
self.bytag = s_tags.ByTag()
self.services = {}
self.on('syn:svc:fini', self._onSynSvcFini)
def _onSynSvcFini(self, mesg):
svcfo = mesg[1].get('svcfo')
iden = svcfo[0]
self.bytag.pop(iden)
def iAmSynSvc(self, iden, props):
'''
API used by synapse service to register with the bus.
Example:
sbus.iAmSynSvc('syn.blah', foo='bar', baz=10)
'''
props['iden'] = iden
svcfo = (iden, props)
sock = s_scope.get('sock')
if sock is not None:
def onfini():
# MULTIPLEXOR - don't block
def _onfini():
oldsvc = self.services.pop(iden, None)
self.bytag.pop(iden)
self.fire('syn:svc:fini', svcfo=oldsvc)
s_glob.pool.call(_onfini)
sock.onfini(onfini)
self.services[iden] = svcfo
tags = props.get('tags', ())
self.bytag.put(iden, tags)
self.fire('syn:svc:init', svcfo=svcfo)
def iAmAlive(self, iden):
'''
"heartbeat" API for services.
Example:
sbus.iAmAlive(iden)
Notes:
This API is generally called by a scheduled loop
within the service object.
'''
svcfo = self.services.get(iden)
if svcfo is not None:
svcfo[1]['checkin'] = int(time.time())
def getSynSvcs(self):
'''
Retrieve a list of the services on the service bus.
Example:
for name,info in sbus.getSynSvcs():
dostuff(name,info)
'''
return list(self.services.values())
def getSynSvcsByTag(self, tag):
'''
Return a list of synapse services by hierarchical tag.
Args:
tag (str): Tag to get services for.
Examples:
Get all the services with the foo.bar tag and dostuff() with the data::
for name,props in sbus.getSynSvcsByTag('foo.bar'):
dostuff(name,props)
Returns:
list: A list of service tufos.
'''
return [self.services.get(i) for i in self.bytag.get(tag)]
class SvcProxy(s_eventbus.EventBus):
'''
A client-side helper for service dispatches.
Mostly exists to wrap functionality for calling multiple
services by tag.
'''
def __init__(self, sbus, timeout=None):
s_eventbus.EventBus.__init__(self)
self.byiden = {}
self.byname = {}
self.bytag = s_tags.ByTag()
self.idenprox = {}
self.nameprox = {}
self.tagprox = {}
self.sbus = sbus
self.timeout = timeout
self.onfini(self.sbus.fini)
# FIXME set a reconnect handler for sbus
self.sbus.on('syn:svc:init', self._onSynSvcInit)
self.sbus.on('syn:svc:init', self.dist)
self.sbus.on('syn:svc:fini', self._onSynSvcFini)
self.sbus.on('syn:svc:fini', self.dist)
[self._addSvcTufo(svcfo) for svcfo in sbus.getSynSvcs()]
def _onSynSvcInit(self, mesg):
svcfo = mesg[1].get('svcfo')
if svcfo is None:
return
self._addSvcTufo(svcfo)
def _addSvcTufo(self, svcfo):
iden = svcfo[0]
tags = svcfo[1].get('tags', ())
name = svcfo[1].get('name', iden)
self.byiden[iden] = svcfo
self.byname[name] = svcfo
self.idenprox[iden] = IdenProxy(self, svcfo)
self.bytag.put(iden, tags)
def _onSynSvcFini(self, mesg):
svcfo = mesg[1].get('svcfo')
iden = svcfo[0]
name = svcfo[1].get('name', iden)
self.bytag.pop(iden)
self.idenprox.pop(iden, None)
self.byname.pop(name, None)
self.byiden.pop(iden, None)
def setSynSvcTimeout(self, timeout):
self.timeout = timeout
def getSynSvc(self, iden):
'''
Return the tufo for the specified svc iden ( or None ).
Example:
svcfo = svcprox.getSynSvc(iden)
if svcfo != None:
dostuff(svcfo)
'''
return self.byiden.get(iden)
def getSynSvcs(self):
'''
Return the current list of known service tufos.
Example:
for svcfo in svcprox.getSynSvcs():
dostuff(svcfo)
'''
return list(self.byiden.values())
def getSynSvcsByTag(self, tag):
'''
Return a list of synapse services by hierarchical tag.
Args:
tag (str): Tag to get services for.
Examples:
Get all the services with the foo.bar tag and dostuff() with the data::
for svcfo in svcprox.getSynSvcsByTag('foo.bar'):
dostuff(name,props)
Returns:
list: A list of service tufos.
'''
return [self.byiden.get(i) for i in self.bytag.get(tag)]
def __getitem__(self, name):
'''
Syntax sugar to allow svcprox['foo'].getFooByBar().
'''
return self.getTagProxy(name)
def callByIden(self, iden, func, *args, **kwargs):
'''
Call a specific object on the service bus by iden.
Example:
ret = svcprox.callByIden(iden,'getFooByBar',bar)
'''
svcfo = self.byiden.get(iden)
if svcfo is None:
raise s_common.NoSuchObj(iden)
dyntask = (func, args, kwargs)
job = self.sbus.callx(iden, dyntask)
self.sbus._waitTeleJob(job, timeout=self.timeout)
return s_async.jobret(job)
def getSynSvcByName(self, name):
return self.byname.get(name)
def callByName(self, name, dyntask, timeout=None):
'''
Call a specific object on the service bus by name.
Example:
# dyntask tuple is (name,args,kwargs)
dyntask = gentask('getFooByBar',bar)
ret = svcprox.callByName('foo0', dyntask)
'''
if timeout is None:
timeout = self.timeout
svcfo = self.getSynSvcByName(name)
if svcfo is None:
raise s_common.NoSuchObj(name)
job = self.sbus.callx(svcfo[0], dyntask)
self.sbus._waitTeleJob(job, timeout=timeout)
return s_async.jobret(job)
def getNameProxy(self, name):
'''
Construct and return a SvcNameProxy to simplify callByName use.
Example:
foosbars = svcprox.getNameProxy('foos_bars')
valu = foosbars.getBlahThing()
dostuff(valu)
'''
prox = self.nameprox.get(name)
if prox is None:
prox = SvcNameProxy(self, name)
self.nameprox[name] = prox
return prox
def callByTag(self, tag, dyntask, timeout=None):
'''
Call a method on all services with the given tag.
Args:
tag (str): Tag to call objects by.
dyntask ((str, tuple, dict): A tuple containing the function name, *args and **kwargs for the task.
timeout (int): Timeout to wait for the job to complete for, in seconds.
Examples:
Call getFooThing on all objects with the 'foo.bar' tag and dostuff() on the results::
dyntask = gentask('getFooThing')
for svcfo,retval in svcprox.callByTag('foo.bar',dyntask):
dostuff(svcfo,retval)
Yields:
tuple: Tuple containing svcfo and job results.
'''
jobs = []
if timeout is None:
timeout = self.timeout
for iden in self.bytag.get(tag):
job = self.sbus.callx(iden, dyntask)
jobs.append((iden, job))
for iden, job in jobs:
self.sbus._waitTeleJob(job, timeout=timeout)
svcfo = self.byiden.get(iden)
try:
yield svcfo, s_async.jobret(job)
except Exception as e:
logger.warning('callByTag (%s): %s() on %s %s', tag, dyntask[0], iden, e)
def getTagProxy(self, tag):
'''
Construct and return a SvcTagProxy to simplify callByTag use.
Example:
foosbars = svcprox.getTagProxy('foos.bars')
for valu in foosbars.getBlahThing():
dostuff(valu)
'''
prox = self.tagprox.get(tag)
if prox is None:
prox = SvcTagProxy(self, tag)
self.tagprox[tag] = prox
return prox
def runSynSvc(self, name, item, tags=(), **props):
'''
Publish an object to the service bus with the given tags.
Example:
foo = Foo()
svcprox.runSynSvc('foo0', foo, tags=('foos.foo0',))
'''
return runSynSvc(name, item, self.sbus, tags=tags, **props)
class SvcNameProxy:
'''
Constructed by SvcProxy for simplifying callByName use.
'''
def __init__(self, svcprox, name):
self.name = name
self.svcprox = svcprox
def _callSvcApi(self, name, *args, **kwargs):
dyntask = (name, args, kwargs)
return self.svcprox.callByName(self.name, dyntask)
def __getattr__(self, name):
item = SvcNameMeth(self, name)
setattr(self, name, item)
return item
class SvcNameMeth:
def __init__(self, nameprox, name):
self.name = name
self.nameprox = nameprox
def __call__(self, *args, **kwargs):
return self.nameprox._callSvcApi(self.name, *args, **kwargs)
class SvcTagProxy:
'''
Constructed by SvcProxy for simplifying callByTag use.
'''
def __init__(self, svcprox, tag):
self.tag = tag
self.svcprox = svcprox
def _callSvcApi(self, name, *args, **kwargs):
dyntask = (name, args, kwargs)
return self.svcprox.callByTag(self.tag, dyntask)
def __getattr__(self, name):
item = SvcTagMeth(self, name)
setattr(self, name, item)
return item
class SvcTagMeth:
def __init__(self, tagprox, name):
self.name = name
self.tagprox = tagprox
def __call__(self, *args, **kwargs):
for name, ret in self.tagprox._callSvcApi(self.name, *args, **kwargs):
yield ret
# FIXME UNIFY WITH ABOVE WHEN BACKWARD BREAK IS OK
class SvcBase:
def __init__(self, svcprox):
self.svcprox = svcprox
def _callSvcMeth(self, name, *args, **kwargs): # pragma: no cover
raise s_common.NoSuchImpl(name='_callSvcMethod')
def __getattr__(self, name):
item = SvcMeth(self, name)
setattr(self, name, item)
return item
class SvcMeth:
def __init__(self, svcbase, name):
self.name = name
self.svcbase = svcbase
def __call__(self, *args, **kwargs):
return self.svcbase._callSvcMeth(self.name, *args, **kwargs)
class IdenProxy(SvcBase):
def __init__(self, svcprox, svcfo):
self.svcfo = svcfo
SvcBase.__init__(self, svcprox)
def _callSvcMeth(self, name, *args, **kwargs):
return self.svcprox.callByIden(self.svcfo[0], name, *args, **kwargs)
def runSynSvc(name, item, sbus, tags=(), **props):
'''
Add an object as a synapse service.
Args:
name (str): Name of the service.
item (object): Callable service object.
sbus (s_telepath.Proxy): Telepath Proxy object pointing to a ServiceBus.
tags:
**props: Additional props to make available about the service.
Examples:
Share the woot object as a service named 'syn.woot'::
woot = Woot()
sbus = s_telepath.openurl('tcp://1.2.3.4:90/syn.svcbus')
runSynSvc('syn.woot', woot, sbus)
Returns:
str: The iden of the instance of the service on the ServiceBus.
'''
iden = s_common.guid()
sbus.push(iden, item)
sbus.push(name, item)
hostinfo = s_thishost.hostinfo
tags = list(tags)
names = s_reflect.getClsNames(item)
tags.extend(['class.%s' % n for n in names])
tags.append(name)
props['name'] = name
props['tags'] = tags
props['hostinfo'] = hostinfo
props['hostname'] = hostinfo.get('hostname')
def onTeleSock(mesg):
if not sbus.isfini:
sbus.iAmSynSvc(iden, props)
def svcHeartBeat():
if sbus.isfini:
return
sbus.call('iAmAlive', iden)
s_glob.sched.insec(30, svcHeartBeat)
svcHeartBeat()
sbus.on('tele:sock:init', onTeleSock)
sbus.iAmSynSvc(iden, props)
return iden
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = '1.4.0'
REQUIRED_PACKAGES = [
'enum34 >= 1.1.6',
'numpy >= 1.12.1',
'six >= 1.10.0',
'protobuf >= 3.3.0',
'tensorflow-tensorboard >= 0.4.0rc1, < 0.5.0',
]
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# remove tensorboard from tf-nightly packages
if 'tf_nightly' in project_name:
for package in REQUIRED_PACKAGES:
if 'tensorflow-tensorboard' in package:
REQUIRED_PACKAGES.remove(package)
break
# weakref.finalize was introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:main',
]
# pylint: enable=line-too-long
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x]
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')) +
list(find_files('*.h', 'external/nsync/public')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='https://www.tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
|
|
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainer.links.model.vision.resnet import BuildingBlock
from chainercv.links.model.faster_rcnn.region_proposal_network \
import RegionProposalNetwork
from chainer_mask_rcnn import functions
from chainer_mask_rcnn.models.mask_rcnn_resnet import _copy_persistent_chain
from chainer_mask_rcnn.models.resnet_extractor import _convert_bn_to_affine
from chainer_mask_rcnn.models.resnet_extractor import ResNet101Extractor
from chainer_mask_rcnn.models.resnet_extractor import ResNet50Extractor
from .mask_rcnn import MaskRCNN
class MaskRCNNResNet(MaskRCNN):
feat_stride = 16
def __init__(self,
n_layers,
n_fg_class=None,
pretrained_model=None,
min_size=600,
max_size=1000,
mask_loss='softmax',
ratios=(0.5, 1, 2),
anchor_scales=(4, 8, 16, 32),
mean=(123.152, 115.903, 103.063),
res_initialW=None,
rpn_initialW=None,
loc_initialW=None,
score_initialW=None,
mask_initialW=None,
proposal_creator_params=dict(
min_size=0,
n_test_pre_nms=6000,
n_test_post_nms=1000,
),
pooling_func=functions.roi_align_2d,
rpn_dim=1024,
roi_size=7,
):
if loc_initialW is None:
loc_initialW = chainer.initializers.Normal(0.001)
if score_initialW is None:
score_initialW = chainer.initializers.Normal(0.01)
if mask_initialW is None:
mask_initialW = chainer.initializers.Normal(0.01)
if rpn_initialW is None:
rpn_initialW = chainer.initializers.Normal(0.01)
if res_initialW is None and pretrained_model:
res_initialW = chainer.initializers.constant.Zero()
if n_layers == 50:
extractor = ResNet50Extractor(
pretrained_model=None if pretrained_model else 'auto',
remove_layers=['res5', 'fc6'],
)
elif n_layers == 101:
extractor = ResNet101Extractor(
pretrained_model=None if pretrained_model else 'auto',
remove_layers=['res5', 'fc6'],
)
else:
raise ValueError
rpn = RegionProposalNetwork(
1024, rpn_dim,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=self.feat_stride,
initialW=rpn_initialW,
proposal_creator_params=proposal_creator_params,
)
head = ResNetRoIHead(
n_layers=n_layers,
n_class=n_fg_class + 1,
roi_size=roi_size, spatial_scale=1. / self.feat_stride,
res_initialW=res_initialW,
loc_initialW=loc_initialW,
score_initialW=score_initialW,
mask_initialW=mask_initialW,
pooling_func=pooling_func,
mask_loss=mask_loss,
)
if len(mean) != 3:
raise ValueError('The mean must be tuple of RGB values.')
mean = np.asarray(mean, dtype=np.float32)[:, None, None]
super(MaskRCNNResNet, self).__init__(
extractor,
rpn,
head,
mean=mean,
min_size=min_size,
max_size=max_size,
mask_loss=mask_loss,
)
if pretrained_model:
chainer.serializers.load_npz(pretrained_model, self)
class ResNetRoIHead(chainer.Chain):
def __init__(self, n_layers, n_class, roi_size, spatial_scale,
pretrained_model='auto',
res_initialW=None, loc_initialW=None, score_initialW=None,
mask_initialW=None,
pooling_func=functions.roi_align_2d, mask_loss='softmax'):
# n_class includes the background
super(ResNetRoIHead, self).__init__()
with self.init_scope():
self.res5 = BuildingBlock(
3, 1024, 512, 2048, stride=roi_size // 7,
initialW=res_initialW)
self.cls_loc = L.Linear(2048, n_class * 4, initialW=loc_initialW)
self.score = L.Linear(2048, n_class, initialW=score_initialW)
self.mask_loss = mask_loss
# 7 x 7 x 2048 -> 14 x 14 x 256
self.deconv6 = L.Deconvolution2D(
2048, 256, 2, stride=2, initialW=mask_initialW
)
# 14 x 14 x 256
n_fg_class = n_class - 1
if self.mask_loss in ['softmax', 'softmax_x2']:
# -> 14 x 14 x (n_fg_class * 3) (bg vs. vis vs. inv)
self.mask = L.Convolution2D(
256, n_fg_class * 3, 1, initialW=mask_initialW
)
elif self.mask_loss == 'sigmoid_softmax':
# -> 14 x 14 x n_fg_class: vis
self.mask = L.Convolution2D(
256, n_fg_class, 1, initialW=mask_initialW
)
# -> 14 x 14 x (n_fg_class * 2): bg vs. inv
self.mask_bginv = L.Convolution2D(
256, n_fg_class * 2, 1, initialW=mask_initialW
)
elif self.mask_loss in ['sigmoid_sigmoid', 'sigmoid_sigmoid+']:
# -> 14 x 14 x n_fg_class: vis
self.mask = L.Convolution2D(
256, n_fg_class, 1, initialW=mask_initialW
)
# -> 14 x 14 x n_fg_class: vis + inv
self.mask_visinv = L.Convolution2D(
256, n_fg_class, 1, initialW=mask_initialW
)
elif self.mask_loss in [
'softmax_relook_softmax',
'softmax_relook_softmax+',
'softmax_relook_softmax+_res',
'softmax_relook_softmax_cls',
'softmax_relook_softmax+_cls',
'softmax_relook_softmax_tt',
'softmax_relook_softmax+_tt',
'softmax_relook_softmax+_tt2',
'softmax_relook_softmax_cls_tt',
'softmax_relook_softmax+_cls_tt',
'softmax_relook_softmax_bbox',
'softmax_relook_softmax+_bbox',
]:
self.mask = L.Convolution2D(
in_channels=256,
out_channels=n_fg_class * 3,
ksize=1,
initialW=mask_initialW,
)
if '_cls' in self.mask_loss:
self.conv5 = L.Convolution2D(
in_channels=n_fg_class * 3 + 1024,
out_channels=1024,
ksize=3,
pad=1,
initialW=mask_initialW,
)
elif '_res' in self.mask_loss:
self.conv5 = L.Convolution2D(
in_channels=3,
out_channels=1024,
ksize=3,
pad=1,
initialW=mask_initialW,
)
else:
self.conv5 = L.Convolution2D(
in_channels=3 + 1024,
out_channels=1024,
ksize=3,
pad=1,
initialW=mask_initialW,
)
self.mask2 = L.Convolution2D(
in_channels=256,
out_channels=3,
ksize=1,
initialW=mask_initialW,
)
else:
raise ValueError
self.n_class = n_class
self.roi_size = roi_size
self.spatial_scale = spatial_scale
self.pooling_func = pooling_func
_convert_bn_to_affine(self)
self._copy_imagenet_pretrained_resnet(n_layers)
def _copy_imagenet_pretrained_resnet(self, n_layers):
if n_layers == 50:
pretrained_model = ResNet50Extractor(pretrained_model='auto')
elif n_layers == 101:
pretrained_model = ResNet101Extractor(pretrained_model='auto')
else:
raise ValueError
self.res5.copyparams(pretrained_model.res5)
_copy_persistent_chain(self.res5, pretrained_model.res5)
def __call__(self, x, rois, roi_indices, pred_bbox=True, pred_mask=True,
pred_bbox2=False, pred_mask2=True, labels=None):
roi_indices = roi_indices.astype(np.float32)
indices_and_rois = self.xp.concatenate(
(roi_indices[:, None], rois), axis=1)
pool = _roi_pooling_2d_yx(
x, indices_and_rois, self.roi_size, self.roi_size,
self.spatial_scale, self.pooling_func)
with chainer.using_config('train', False):
res5 = self.res5(pool)
roi_cls_locs = None
roi_scores = None
roi_masks = None
if pred_bbox:
pool5 = F.average_pooling_2d(res5, 7, stride=7)
roi_cls_locs = self.cls_loc(pool5)
roi_scores = self.score(pool5)
if pred_mask:
deconv6 = F.relu(self.deconv6(res5))
if self.mask_loss in ['softmax', 'softmax_x2']:
roi_masks = self.mask(deconv6)
elif self.mask_loss == 'sigmoid_softmax':
roi_masks = self.mask(deconv6)
roi_masks_bginv = self.mask_bginv(deconv6)
roi_masks = (roi_masks, roi_masks_bginv)
elif self.mask_loss == 'sigmoid_sigmoid':
roi_masks = self.mask(deconv6)
roi_masks_visinv = self.mask_visinv(deconv6)
roi_masks = (roi_masks, roi_masks_visinv)
elif self.mask_loss == 'sigmoid_sigmoid+':
roi_masks = self.mask(deconv6)
roi_masks_visinv = self.mask_visinv(deconv6)
roi_masks = (roi_masks, roi_masks + roi_masks_visinv)
elif self.mask_loss in [
'softmax_relook_softmax',
'softmax_relook_softmax+',
'softmax_relook_softmax+_res',
'softmax_relook_softmax_cls',
'softmax_relook_softmax+_cls',
'softmax_relook_softmax_tt',
'softmax_relook_softmax+_tt',
'softmax_relook_softmax+_tt2',
'softmax_relook_softmax_cls_tt',
'softmax_relook_softmax+_cls_tt',
'softmax_relook_softmax_bbox',
'softmax_relook_softmax+_bbox',
]:
assert labels is not None
# roi_masks: (n_roi, n_fg_class, 14, 14) -> (n_roi, 14, 14)
# print('deconv6', deconv6.shape)
roi_masks = self.mask(deconv6)
# print('roi_masks', roi_masks.shape)
n_roi = rois.shape[0]
# print('labels', labels.shape)
n_positive = int((labels > 0).sum())
# print('n_positive', n_positive)
labels = labels[:n_positive]
rois = rois[:n_positive]
# indices_and_rois = indices_and_rois[:n_positive]
# print('labels', labels.shape)
# print('rois', rois.shape)
roi_masks = F.reshape(
roi_masks,
(n_roi, -1, 3, roi_masks.shape[2], roi_masks.shape[3]))
# print('roi_masks', roi_masks.shape)
roi_masks = roi_masks[np.arange(n_positive), labels - 1]
assert (labels == 0).sum() == 0
# print('roi_masks', roi_masks.shape)
if '_cls' in self.mask_loss:
whole_masks = roi_mask_to_whole_mask(
F.softmax(roi_masks).array,
rois, x.shape[2:4], self.spatial_scale,
fg_labels=labels - 1, n_fg_class=self.n_class - 1)
else:
whole_masks = roi_mask_to_whole_mask(
F.softmax(roi_masks).array,
rois, x.shape[2:4], self.spatial_scale)
# print('whole_masks', whole_masks.shape)
whole_masks = F.reshape(
whole_masks,
(1, -1, whole_masks.shape[2], whole_masks.shape[3]))
# print('whole_masks', whole_masks.shape)
if '_res' in self.mask_loss:
h = self.conv5(whole_masks)
h = F.relu(h + x)
else:
h = F.concat([whole_masks, x], axis=1)
# print('h', h.shape)
h = F.relu(self.conv5(h)) # 1/16, whole
# print('h', h.shape)
h = _roi_pooling_2d_yx(
h, indices_and_rois,
self.roi_size, self.roi_size,
self.spatial_scale, self.pooling_func)
# print('h', h.shape) # 1/16, roi
with chainer.using_config('train', False):
res5 = self.res5(h)
# print('h', h.shape) # 1/16, roi
if pred_bbox2:
pool5 = F.average_pooling_2d(res5, 7, stride=7)
roi_cls_locs2 = self.cls_loc(pool5)
roi_scores2 = self.score(pool5)
roi_cls_locs = (roi_cls_locs, roi_cls_locs2)
roi_scores = (roi_scores, roi_scores2)
roi_masks2 = None
if pred_mask2:
h = F.relu(self.deconv6(res5))
h = h[:n_positive, :, :, :]
# print('h', h.shape) # 1/8, roi
roi_masks2 = self.mask2(h) # 1/8, roi
# print('roi_masks2', roi_masks2.shape)
roi_masks = (roi_masks, roi_masks2)
else:
raise ValueError
return roi_cls_locs, roi_scores, roi_masks
def roi_mask_to_whole_mask(roi_masks, rois, img_shape, spatial_scale,
fg_labels=None, n_fg_class=None):
class_specific = False
if fg_labels is not None or n_fg_class is not None:
assert fg_labels is not None
assert n_fg_class is not None
class_specific = True
xp = chainer.cuda.get_array_module(roi_masks)
rois = (rois * spatial_scale).astype(xp.int32)
rois[:, 0::2] = xp.clip(rois[:, 0::2], 0, img_shape[0])
rois[:, 1::2] = xp.clip(rois[:, 1::2], 0, img_shape[1])
n_roi = roi_masks.shape[0]
assert rois.shape[0] == n_roi
if class_specific:
masks = xp.zeros(
(1, n_fg_class, roi_masks.shape[1], img_shape[0], img_shape[1]),
dtype=xp.float32)
else:
masks = xp.zeros(
(1, roi_masks.shape[1], img_shape[0], img_shape[1]),
dtype=xp.float32)
for i in range(n_roi):
roi_mask = roi_masks[i]
y1, x1, y2, x2 = rois[i]
roi_H = int(y2 - y1)
roi_W = int(x2 - x1)
y1, x1, y2, x2 = map(int, [y1, x1, y2, x2])
roi_mask = F.resize_images(
roi_mask[None, :, :, :], (roi_H, roi_W)).array[0, :, :, :]
if class_specific:
fg_label = fg_labels[i]
masks[0, fg_label, :, y1:y2, x1:x2] += roi_mask
else:
masks[0, :, y1:y2, x1:x2] += roi_mask
masks = masks.reshape(1, -1, img_shape[0], img_shape[1])
return masks
def _roi_pooling_2d_yx(x, indices_and_rois, outh, outw, spatial_scale,
pooling_func):
xy_indices_and_rois = indices_and_rois[:, [0, 2, 1, 4, 3]]
pool = pooling_func(x, xy_indices_and_rois, outh, outw, spatial_scale)
return pool
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.