gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
# pylint: disable=W0231, W0142
"""Tests for statistical power calculations
Note:
tests for chisquare power are in test_gof.py
Created on Sat Mar 09 08:44:49 2013
Author: Josef Perktold
"""
import copy
import warnings
import nose
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose, assert_raises,
assert_equal, assert_warns, dec)
import scipy
import statsmodels.stats.power as smp
from statsmodels.stats.tests.test_weightstats import Holder
try:
import matplotlib.pyplot as plt # makes plt available for test functions
have_matplotlib = True
except ImportError:
have_matplotlib = False
SM_GT_10 = LooseVersion(scipy.__version__) >= '0.10'
class CheckPowerMixin(object):
def test_power(self):
#test against R results
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res1 = self.cls()
assert_almost_equal(res1.power(**kwds), self.res2.power, decimal=decimal)
def test_positional(self):
res1 = self.cls()
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
# positional args
if hasattr(self, 'args_names'):
args_names = self.args_names
else:
nobs_ = 'nobs' if 'nobs' in kwds else 'nobs1'
args_names = ['effect_size', nobs_, 'alpha']
# pop positional args
args = [kwds.pop(arg) for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res = res1.power(*args, **kwds)
assert_almost_equal(res, self.res2.power, decimal=decimal)
def test_roots(self):
kwds = copy.copy(self.kwds)
kwds.update(self.kwds_extra)
# kwds_extra are used as argument, but not as target for root
for key in self.kwds:
# keep print to check whether tests are really executed
#print 'testing roots', key
value = kwds[key]
kwds[key] = None
result = self.cls().solve_power(**kwds)
assert_allclose(result, value, rtol=0.001, err_msg=key+' failed')
# yield can be used to investigate specific errors
#yield assert_allclose, result, value, 0.001, 0, key+' failed'
kwds[key] = value # reset dict
@dec.skipif(not have_matplotlib)
def test_power_plot(self):
if self.cls == smp.FTestPower:
raise nose.SkipTest('skip FTestPower plot_power')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
fig = self.cls().plot_power(dep_var='nobs',
nobs= np.arange(2, 100),
effect_size=np.array([0.1, 0.2, 0.3, 0.5, 1]),
#alternative='larger',
ax=ax, title='Power of t-Test',
**self.kwds_extra)
ax = fig.add_subplot(2,1,2)
fig = self.cls().plot_power(dep_var='es',
nobs=np.array([10, 20, 30, 50, 70, 100]),
effect_size=np.linspace(0.01, 2, 51),
#alternative='larger',
ax=ax, title='',
**self.kwds_extra)
plt.close(fig)
#''' test cases
#one sample
# two-sided one-sided
#large power OneS1 OneS3
#small power OneS2 OneS4
#
#two sample
# two-sided one-sided
#large power TwoS1 TwoS3
#small power TwoS2 TwoS4
#small p, ratio TwoS4 TwoS5
#'''
class TestTTPowerOneS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.9995636009612725
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS2(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.1359562887679666
res2.alternative = 'two.sided'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, prefix='tt_power1_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.999892010204909
res2.alternative = 'greater'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS4(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.05,n=20,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.05
res2.sig_level = 0.05
res2.power = 0.0764888785042198
res2.alternative = 'greater'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS5(CheckPowerMixin):
# case one-sided less, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.006063932667926375
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerOneS6(CheckPowerMixin):
# case one-sided less, negative effect size, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=-0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = -0.2
res2.sig_level = 0.05
res2.power = 0.21707518167191
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerTwoS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.967708258242517
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.1,n=20,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.06095912465411235
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, prefix='tt_power2_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.985459690251624
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS4(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.01,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 30
res2.d = 0.01
res2.sig_level = 0.05
res2.power = 0.0540740302835667
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS5(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="two.sided")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.0633081832564667
res2.alternative = 'two.sided'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'two-sided'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS6(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="greater")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.09623589080917805
res2.alternative = 'greater'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
def test_normal_power_explicit():
# a few initial test cases for NormalIndPower
sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
res1 = smp.normal_power(d, nobs/2., 0.05)
res2 = smp.NormalIndPower().power(d, nobs, 0.05)
res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, power=None)
res_R = 0.475100870572638
assert_almost_equal(res1, res_R, decimal=13)
assert_almost_equal(res2, res_R, decimal=13)
assert_almost_equal(res3, res_R, decimal=13)
norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
# Note: negative effect size is same as switching one-sided alternative
# TODO: should I switch to larger/smaller instead of "one-sided" options
norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.0438089705093578
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
class TestNormalIndPower1(CheckPowerMixin):
def __init__(self):
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 80
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.NormalIndPower
class TestNormalIndPower2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> np = pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.h = 0.01
res2.n = 80
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = ('Difference of proportion power calculation for' +
' binomial distribution (arcsine transformation)')
res2.note = 'same sample sizes'
self.res2 = res2
self.kwds = {'effect_size': res2.h, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp1(CheckPowerMixin):
def __init__(self):
# forcing one-sample by using ratio=0
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 40
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp2(CheckPowerMixin):
# Note: same power as two sample case with twice as many observations
def __init__(self):
# forcing one-sample by using ratio=0
res2 = Holder()
#> np = pwr.norm.test(d=0.01,n=40,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.d = 0.01
res2.n = 40
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = 'Mean power calculation for normal distribution with known variance'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0, 'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestChisquarePower(CheckPowerMixin):
def __init__(self):
# one example from test_gof, results_power
res2 = Holder()
res2.w = 0.1
res2.N = 5
res2.df = 4
res2.sig_level = 0.05
res2.power = 0.05246644635810126
res2.method = 'Chi squared power calculation'
res2.note = 'N is the number of observations'
self.res2 = res2
self.kwds = {'effect_size': res2.w, 'nobs': res2.N,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'n_bins': res2.df + 1}
self.cls = smp.GofChisquarePower
def _test_positional(self):
res1 = self.cls()
args_names = ['effect_size','nobs', 'alpha', 'n_bins']
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
args = [kwds[arg] for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal #pylint: disable-msg=E1101
else:
decimal = 6
assert_almost_equal(res1.power(*args), self.res2.power, decimal=decimal)
def test_ftest_power():
#equivalence ftest, ttest
for alpha in [0.01, 0.05, 0.1, 0.20, 0.50]:
res0 = smp.ttest_power(0.01, 200, alpha)
res1 = smp.ftest_power(0.01, 199, 1, alpha=alpha, ncc=0)
assert_almost_equal(res1, res0, decimal=6)
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
res1 = smp.ftest_anova_power(0.25, 200, 0.1592, k_groups=10)
res0 = 0.8408
assert_almost_equal(res1, res0, decimal=4)
# TODO: no class yet
# examples agains R::pwr
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.1**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.01
res2.sig_level = 0.01
res2.power = 0.0494137732920332
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.3**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.09
res2.sig_level = 0.01
res2.power = 0.7967191006290872
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
# class based version of two above test for Ftest
class TestFtestAnovaPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
#converted to res2 by hand
res2.f = 0.25
res2.n = 200
res2.k = 10
res2.alpha = 0.1592
res2.power = 0.8408
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.f, 'nobs': res2.n,
'alpha': res2.alpha, 'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'k_groups': res2.k} # rootfinding doesn't work
#self.args_names = ['effect_size','nobs', 'alpha']#, 'k_groups']
self.cls = smp.FTestAnovaPower
# precision for test_power
self.decimal = 4
class TestFtestPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': np.sqrt(res2.f2), 'df_num': res2.v,
'df_denom': res2.u, 'alpha': res2.sig_level,
'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {}
self.args_names = ['effect_size', 'df_num', 'df_denom', 'alpha']
self.cls = smp.FTestPower
# precision for test_power
self.decimal = 5
def test_power_solver():
# messing up the solver to trigger backup
nip = smp.NormalIndPower()
# check result
es0 = 0.1
pow_ = nip.solve_power(es0, nobs1=1600, alpha=0.01, power=None, ratio=1,
alternative='larger')
# value is regression test
assert_almost_equal(pow_, 0.69219411243824214, decimal=5)
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 2)
# cause first optimizer to fail
nip.start_bqexp['effect_size'] = {'upp': -10, 'low': -20}
nip.start_ttp['effect_size'] = 0.14
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3, err_msg=repr(nip.cache_fit_res))
nip.start_ttp['effect_size'] = np.nan
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 4)
# I let this case fail, could be fixed for some statistical tests
# (we shouldn't get here in the first place)
# effect size is negative, but last stage brentq uses [1e-8, 1-1e-8]
assert_raises(ValueError, nip.solve_power, None, nobs1=1600, alpha=0.01,
power=0.005, ratio=1, alternative='larger')
@dec.skipif(SM_GT_10, 'Known failure on modern SciPy')
def test_power_solver_warn():
# messing up the solver to trigger warning
# I wrote this with scipy 0.9,
# convergence behavior of scipy 0.11 is different,
# fails at a different case, but is successful where it failed before
pow_ = 0.69219411243824214 # from previous function
nip = smp.NormalIndPower()
# using nobs, has one backup (fsolve)
nip.start_bqexp['nobs1'] = {'upp': 50, 'low': -20}
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(val, 1600, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3)
# case that has convergence failure, and should warn
nip.start_ttp['nobs1'] = np.nan
from statsmodels.tools.sm_exceptions import ConvergenceWarning
assert_warns(ConvergenceWarning, nip.solve_power, 0.1, nobs1=None,
alpha=0.01, power=pow_, ratio=1, alternative='larger')
# this converges with scipy 0.11 ???
# nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1, alternative='larger')
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore")
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_equal(nip.cache_fit_res[0], 0)
assert_equal(len(nip.cache_fit_res), 3)
if __name__ == '__main__':
test_normal_power_explicit()
nt = TestNormalIndPower1()
nt.test_power()
nt.test_roots()
nt = TestNormalIndPower_onesamp1()
nt.test_power()
nt.test_roots()
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Boot session from cache or build
Session bootstraps info needed by common client side activities including
permission, homepage, default variables, system defaults etc
"""
import frappe, json
from frappe import _
import frappe.utils
from frappe.utils import cint
import frappe.model.meta
import frappe.defaults
import frappe.translate
@frappe.whitelist()
def clear(user=None):
frappe.local.session_obj.update(force=True)
frappe.local.db.commit()
clear_cache(frappe.session.user)
frappe.response['message'] = "Cache Cleared"
def clear_cache(user=None):
cache = frappe.cache()
frappe.model.meta.clear_cache()
cache.delete_value(["app_hooks", "installed_apps", "app_modules", "module_apps", "home_page",
"time_zone"])
def delete_user_cache(user):
for key in ("bootinfo", "lang", "roles", "restrictions", "home_page"):
cache.delete_value(key + ":" + user)
def clear_notifications(user=None):
if frappe.flags.in_install_app!="frappe":
if user:
frappe.db.sql("""delete from `tabNotification Count` where owner=%s""", (user,))
else:
frappe.db.sql("""delete from `tabNotification Count`""")
if user:
delete_user_cache(user)
clear_notifications(user)
if frappe.session:
if user==frappe.session.user and frappe.session.sid:
cache.delete_value("session:" + frappe.session.sid)
else:
for sid in frappe.db.sql_list("""select sid from tabSessions
where user=%s""", (user,)):
cache.delete_value("session:" + sid)
frappe.defaults.clear_cache(user)
else:
for sess in frappe.db.sql("""select user, sid from tabSessions""", as_dict=1):
delete_user_cache(sess.user)
cache.delete_value("session:" + sess.sid)
delete_user_cache("Guest")
clear_notifications()
frappe.defaults.clear_cache()
def clear_sessions(user=None, keep_current=False):
if not user:
user = frappe.session.user
for sid in frappe.db.sql("""select sid from tabSessions where user=%s""", (user,)):
if keep_current and frappe.session.sid==sid[0]:
continue
else:
delete_session(sid[0])
def delete_session(sid=None):
frappe.cache().delete_value("session:" + sid)
frappe.db.sql("""delete from tabSessions where sid=%s""", sid)
def clear_all_sessions():
"""This effectively logs out all users"""
frappe.only_for("Administrator")
for sid in frappe.db.sql_list("select sid from `tabSessions`"):
delete_session(sid)
def clear_expired_sessions():
"""This function is meant to be called from scheduler"""
for sid in frappe.db.sql_list("""select sid
from tabSessions where TIMEDIFF(NOW(), lastupdate) > TIME(%s)""", get_expiry_period()):
delete_session(sid)
def get():
"""get session boot info"""
from frappe.core.doctype.notification_count.notification_count import \
get_notification_info_for_boot, get_notifications
from frappe.boot import get_bootinfo, get_startup_js
bootinfo = None
if not getattr(frappe.conf,'disable_session_cache', None):
# check if cache exists
bootinfo = frappe.cache().get_value('bootinfo:' + frappe.session.user)
if bootinfo:
bootinfo['from_cache'] = 1
bootinfo["user"]["recent"] = json.dumps(frappe.cache().get_value("recent:" + frappe.session.user))
bootinfo["notification_info"].update(get_notifications())
if not bootinfo:
if not frappe.cache().get_stats():
frappe.msgprint(_("memcached is not working / stopped. Please start memcached for best results."))
# if not create it
bootinfo = get_bootinfo()
bootinfo["notification_info"] = get_notification_info_for_boot()
frappe.cache().set_value('bootinfo:' + frappe.session.user, bootinfo)
bootinfo["metadata_version"] = frappe.cache().get_value("metadata_version")
if not bootinfo["metadata_version"]:
bootinfo["metadata_version"] = frappe.reset_metadata_version()
bootinfo["startup_js"] = get_startup_js()
for hook in frappe.get_hooks("extend_bootinfo"):
frappe.get_attr(hook)(bootinfo=bootinfo)
return bootinfo
class Session:
def __init__(self, user, resume=False):
self.sid = frappe.form_dict.get('sid') or frappe.request.cookies.get('sid', 'Guest')
self.user = user
self.data = frappe._dict({'data': frappe._dict({})})
self.time_diff = None
if resume:
self.resume()
else:
self.start()
# set local session
frappe.local.session = self.data
def start(self):
"""start a new session"""
# generate sid
if self.user=='Guest':
sid = 'Guest'
else:
sid = frappe.generate_hash()
self.data['user'] = self.user
self.data['sid'] = sid
self.data['data']['user'] = self.user
self.data['data']['session_ip'] = frappe.get_request_header('REMOTE_ADDR')
if self.user != "Guest":
self.data['data']['last_updated'] = frappe.utils.now()
self.data['data']['session_expiry'] = get_expiry_period()
self.data['data']['session_country'] = get_geo_ip_country(frappe.get_request_header('REMOTE_ADDR'))
# insert session
if self.user!="Guest":
frappe.db.begin()
self.insert_session_record()
# update user
frappe.db.sql("""UPDATE tabUser SET last_login = %s, last_ip = %s
where name=%s""", (frappe.utils.now(), frappe.get_request_header('REMOTE_ADDR'), self.data['user']))
frappe.db.commit()
def insert_session_record(self):
frappe.db.sql("""insert into tabSessions
(sessiondata, user, lastupdate, sid, status)
values (%s , %s, NOW(), %s, 'Active')""",
(str(self.data['data']), self.data['user'], self.data['sid']))
# also add to memcache
frappe.cache().set_value("session:" + self.data.sid, self.data)
def resume(self):
"""non-login request: load a session"""
import frappe
data = self.get_session_record()
if data:
# set language
self.data = frappe._dict({'data': data, 'user':data.user, 'sid': self.sid})
else:
self.start_as_guest()
frappe.local.lang = frappe.translate.get_user_lang(self.data.user)
def get_session_record(self):
"""get session record, or return the standard Guest Record"""
from frappe.auth import clear_cookies
r = self.get_session_data()
if not r:
frappe.response["session_expired"] = 1
clear_cookies()
self.sid = "Guest"
r = self.get_session_data()
return r
def get_session_data(self):
if self.sid=="Guest":
return frappe._dict({"user":"Guest"})
data = self.get_session_data_from_cache()
if not data:
data = self.get_session_data_from_db()
return data
def get_session_data_from_cache(self):
data = frappe._dict(frappe.cache().get_value("session:" + self.sid) or {})
if data:
session_data = data.get("data", {})
self.time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
session_data.get("last_updated"))
expiry = self.get_expiry_in_seconds(session_data.get("session_expiry"))
if self.time_diff > expiry:
self.delete_session()
data = None
return data and data.data
def get_session_data_from_db(self):
rec = frappe.db.sql("""select user, sessiondata
from tabSessions where sid=%s and
TIMEDIFF(NOW(), lastupdate) < TIME(%s)""", (self.sid,
get_expiry_period()))
if rec:
data = frappe._dict(eval(rec and rec[0][1] or '{}'))
data.user = rec[0][0]
else:
self.delete_session()
data = None
return data
def get_expiry_in_seconds(self, expiry):
if not expiry: return 3600
parts = expiry.split(":")
return (cint(parts[0]) * 3600) + (cint(parts[1]) * 60) + cint(parts[2])
def delete_session(self):
delete_session(self.sid)
def start_as_guest(self):
"""all guests share the same 'Guest' session"""
self.user = "Guest"
self.start()
def update(self, force=False):
"""extend session expiry"""
self.data['data']['last_updated'] = frappe.utils.now()
self.data['data']['lang'] = unicode(frappe.lang)
# update session in db
time_diff = None
last_updated = frappe.cache().get_value("last_db_session_update:" + self.sid)
if last_updated:
time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
last_updated)
if force or (frappe.session['user'] != 'Guest' and \
((time_diff==None) or (time_diff > 1800))):
# database persistence is secondary, don't update it too often
frappe.db.sql("""update tabSessions set sessiondata=%s,
lastupdate=NOW() where sid=%s""" , (str(self.data['data']),
self.data['sid']))
if frappe.form_dict.cmd not in ("frappe.sessions.clear", "logout"):
frappe.cache().set_value("last_db_session_update:" + self.sid,
frappe.utils.now())
frappe.cache().set_value("session:" + self.sid, self.data)
def get_expiry_period():
exp_sec = frappe.defaults.get_global_default("session_expiry") or "06:00:00"
# incase seconds is missing
if len(exp_sec.split(':')) == 2:
exp_sec = exp_sec + ':00'
return exp_sec
def get_geo_ip_country(ip_addr):
try:
import pygeoip
except ImportError:
return
import os
try:
geo_ip_file = os.path.join(os.path.dirname(frappe.__file__), "data", "GeoIP.dat")
geo_ip = pygeoip.GeoIP(geo_ip_file, pygeoip.MEMORY_CACHE)
return geo_ip.country_name_by_addr(ip_addr)
except Exception:
return
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'ServerDomainLkupEnum' : _MetaInfoEnum('ServerDomainLkupEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper',
{
'static-mapping':'STATIC_MAPPING',
'domain-service':'DOMAIN_SERVICE',
}, 'Cisco-IOS-XR-ip-domain-oper', _yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper']),
'HostAddressBaseIdentity' : {
'meta_info' : _MetaInfoClass('HostAddressBaseIdentity',
False,
[
],
'Cisco-IOS-XR-ip-domain-oper',
'Host-address-base',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf.Server.ServerAddress' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf.Server.ServerAddress',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_IDENTITY_CLASS, 'HostAddressBaseIdentity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'HostAddressBaseIdentity',
[], [],
''' AFName
''',
'af_name',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'server-address',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf.Server' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf.Server',
False,
[
_MetaInfoClassMember('domain', REFERENCE_LEAFLIST, 'str' , None, None,
[(0, 256)], [],
''' Domain list
''',
'domain',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('domain-lookup', REFERENCE_ENUM_CLASS, 'ServerDomainLkupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'ServerDomainLkupEnum',
[], [],
''' Domain lookup
''',
'domain_lookup',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('domain-name', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Domain name
''',
'domain_name',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('server-address', REFERENCE_LIST, 'ServerAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf.Server.ServerAddress',
[], [],
''' Server address list
''',
'server_address',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'server',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf.Hosts.Host.HostAliasList' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf.Hosts.Host.HostAliasList',
False,
[
_MetaInfoClassMember('host-alias', REFERENCE_LEAFLIST, 'str' , None, None,
[(0, 256)], [],
''' Host alias list
''',
'host_alias',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'host-alias-list',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf.Hosts.Host.HostAddress' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf.Hosts.Host.HostAddress',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_IDENTITY_CLASS, 'HostAddressBaseIdentity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'HostAddressBaseIdentity',
[], [],
''' AFName
''',
'af_name',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'host-address',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf.Hosts.Host' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf.Hosts.Host',
False,
[
_MetaInfoClassMember('host-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Hostname
''',
'host_name',
'Cisco-IOS-XR-ip-domain-oper', True),
_MetaInfoClassMember('af-name', REFERENCE_IDENTITY_CLASS, 'HostAddressBaseIdentity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'HostAddressBaseIdentity',
[], [],
''' Address type
''',
'af_name',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('age', ATTRIBUTE, 'int' , None, None,
[(0, 65535)], [],
''' Age in hours
''',
'age',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('host-address', REFERENCE_LIST, 'HostAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf.Hosts.Host.HostAddress',
[], [],
''' Host address list
''',
'host_address',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('host-alias-list', REFERENCE_CLASS, 'HostAliasList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf.Hosts.Host.HostAliasList',
[], [],
''' Host alias
''',
'host_alias_list',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf.Hosts' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf.Hosts.Host',
[], [],
''' IP domain-name, lookup style, nameservers for
specific host
''',
'host',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs.Vrf' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-ip-domain-oper', True),
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf.Hosts',
[], [],
''' List of domain hosts
''',
'hosts',
'Cisco-IOS-XR-ip-domain-oper', False),
_MetaInfoClassMember('server', REFERENCE_CLASS, 'Server' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf.Server',
[], [],
''' Domain server data
''',
'server',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain.Vrfs' : {
'meta_info' : _MetaInfoClass('IpDomain.Vrfs',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs.Vrf',
[], [],
''' VRF instance
''',
'vrf',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'IpDomain' : {
'meta_info' : _MetaInfoClass('IpDomain',
False,
[
_MetaInfoClassMember('vrfs', REFERENCE_CLASS, 'Vrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper', 'IpDomain.Vrfs',
[], [],
''' List of VRFs
''',
'vrfs',
'Cisco-IOS-XR-ip-domain-oper', False),
],
'Cisco-IOS-XR-ip-domain-oper',
'ip-domain',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'Ipv4Identity' : {
'meta_info' : _MetaInfoClass('Ipv4Identity',
False,
[
],
'Cisco-IOS-XR-ip-domain-oper',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
'Ipv6Identity' : {
'meta_info' : _MetaInfoClass('Ipv6Identity',
False,
[
],
'Cisco-IOS-XR-ip-domain-oper',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-ip-domain-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_domain_oper'
),
},
}
_meta_table['IpDomain.Vrfs.Vrf.Server.ServerAddress']['meta_info'].parent =_meta_table['IpDomain.Vrfs.Vrf.Server']['meta_info']
_meta_table['IpDomain.Vrfs.Vrf.Hosts.Host.HostAliasList']['meta_info'].parent =_meta_table['IpDomain.Vrfs.Vrf.Hosts.Host']['meta_info']
_meta_table['IpDomain.Vrfs.Vrf.Hosts.Host.HostAddress']['meta_info'].parent =_meta_table['IpDomain.Vrfs.Vrf.Hosts.Host']['meta_info']
_meta_table['IpDomain.Vrfs.Vrf.Hosts.Host']['meta_info'].parent =_meta_table['IpDomain.Vrfs.Vrf.Hosts']['meta_info']
_meta_table['IpDomain.Vrfs.Vrf.Server']['meta_info'].parent =_meta_table['IpDomain.Vrfs.Vrf']['meta_info']
_meta_table['IpDomain.Vrfs.Vrf.Hosts']['meta_info'].parent =_meta_table['IpDomain.Vrfs.Vrf']['meta_info']
_meta_table['IpDomain.Vrfs.Vrf']['meta_info'].parent =_meta_table['IpDomain.Vrfs']['meta_info']
_meta_table['IpDomain.Vrfs']['meta_info'].parent =_meta_table['IpDomain']['meta_info']
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a Householder transformation."""
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorHouseholder",]
@tf_export("linalg.LinearOperatorHouseholder")
@linear_operator.make_composite_tensor
class LinearOperatorHouseholder(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of Householder transformations.
This operator acts like a [batch] of householder reflections with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorHouseholder` is initialized with a (batch) vector.
A Householder reflection, defined via a vector `v`, which reflects points
in `R^n` about the hyperplane orthogonal to `v` and through the origin.
```python
# Create a 2 x 2 householder transform.
vec = [1 / np.sqrt(2), 1. / np.sqrt(2)]
operator = LinearOperatorHouseholder(vec)
operator.to_dense()
==> [[0., -1.]
[-1., -0.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
reflection_axis,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorHouseholder"):
r"""Initialize a `LinearOperatorHouseholder`.
Args:
reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The vector defining the hyperplane to reflect about.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This is autoset to true
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
This is autoset to false.
is_square: Expect that this operator acts like square [batch] matrices.
This is autoset to true.
name: A name for this `LinearOperator`.
Raises:
ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
not `False` or `is_square` is not `True`.
"""
parameters = dict(
reflection_axis=reflection_axis,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[reflection_axis]):
self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(
reflection_axis, name="reflection_axis")
self._check_reflection_axis(self._reflection_axis)
# Check and auto-set hints.
if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always self adjoint.")
else:
is_self_adjoint = True
if is_positive_definite is True: # pylint:disable=g-bool-id-comparison
raise ValueError(
"A Householder operator is always non-positive definite.")
else:
is_positive_definite = False
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always square.")
is_square = True
super(LinearOperatorHouseholder, self).__init__(
dtype=self._reflection_axis.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _check_reflection_axis(self, reflection_axis):
"""Static check of reflection_axis."""
if (reflection_axis.shape.ndims is not None and
reflection_axis.shape.ndims < 1):
raise ValueError(
"Argument reflection_axis must have at least 1 dimension. "
"Found: %s" % reflection_axis)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._reflection_axis.shape
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._reflection_axis)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Householder operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a vector `v`, we would like to reflect `x` about the hyperplane
# orthogonal to `v` going through the origin. We first project `x` to `v`
# to get v * dot(v, x) / dot(v, v). After we project, we can reflect the
# projection about the hyperplane by flipping sign to get
# -v * dot(v, x) / dot(v, v). Finally, we can add back the component
# that is orthogonal to v. This is invariant under reflection, since the
# whole hyperplane is invariant. This component is equal to x - v * dot(v,
# x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
# for the reflection.
# Note that because this is a reflection, it lies in O(n) (for real vector
# spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
reflection_axis = ops.convert_to_tensor_v2_with_dispatch(
self.reflection_axis)
x = linalg.adjoint(x) if adjoint_arg else x
normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
mat = normalized_axis[..., array_ops.newaxis]
x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)
return x - 2 * mat * x_dot_normalized_v
def _trace(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
shape = self.shape_tensor()
return math_ops.cast(
self._domain_dimension_tensor(shape=shape) - 2,
self.dtype) * array_ops.ones(
shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
def _determinant(self):
# For householder transformations, the determinant is -1.
return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) # pylint: disable=invalid-unary-operand-type
def _log_abs_determinant(self):
# Orthogonal matrix -> log|Q| = 0.
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# A householder reflection is a reflection, hence is idempotent. Thus we
# can just apply a matmul.
return self._matmul(rhs, adjoint, adjoint_arg)
def _to_dense(self):
reflection_axis = ops.convert_to_tensor_v2_with_dispatch(
self.reflection_axis)
normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
mat = normalized_axis[..., array_ops.newaxis]
matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
return array_ops.matrix_set_diag(
matrix, 1. + array_ops.matrix_diag_part(matrix))
def _diag_part(self):
reflection_axis = ops.convert_to_tensor_v2_with_dispatch(
self.reflection_axis)
normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
def _eigvals(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
result_shape = array_ops.shape(self.reflection_axis)
n = result_shape[-1]
ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
eigvals = array_ops.concat(
[-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1) # pylint: disable=invalid-unary-operand-type
return eigvals
def _cond(self):
# Householder matrices are rotations which have condition number 1.
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def reflection_axis(self):
return self._reflection_axis
@property
def _composite_tensor_fields(self):
return ("reflection_axis",)
|
|
import argparse
import re
def process_qasm(fname):
qgates = ['H','X','CNOT','Y','Z','S','T','Tdag','Sdag','Rz','PrepX','PrepZ','MeasX','MeasZ','Toffoli','Fredkin']
qgates_1 = ['H','X','Y','Z','S','T','Tdag']
qgates_1a = ['Sdag']
qgates_2 = ['CNOT']
qgates_3 = ['Toffoli','Fredkin']
qgates_4 = ['PrepX','PrepZ']
qgates_5 = ['MeasX','MeasZ']
qgates_6 = ['Rz']
qgates_7 = ['afree']
gateNames = {
'H':'H',
'X':'X',
'Y':'Y',
'Z':'Z',
'S':'S',
'T':'T',
'Sdag':'Sdag',
'Tdag':'Tdag',
'PrepX':'PrepX', #'Px',
'PrepZ':'PrepZ', #'Pz',
'MeasZ':'MeasZ', #'Mz',
'MeasX':'MeasX', #'Mx',
'Rz':'Rz',
'CNOT':'CNOT', #'CX',
'Toffoli':'Tof',
'Fredkin':'Fredkin',
'afree':'afree'
}
pattern_qbit_decl = re.compile(r"\s*\bqbit\b\s+(?P<qbit_var>\w+)\s*\[\s*(?P<array_size>\d+)\s*\]\s*;")
pattern_cbit_decl = re.compile(r"\s*\bcbit\b\s+(?P<qbit_var>\w+)\s*\[\s*(?P<array_size>\d+)\s*\]\s*;")
pattern_qg = re.compile(r"\s*((\w+|\w+\[(.*?)\])\s*\=)*\s*(?P<func_name>\w+)\s*\(\s*(?P<array_size>(.*?))\s*\)\s*;")
pattern_qbit_arg = re.compile(r"(.*?)\((.*?)\bqbit\b\s*(.*?)\)(.*?)")
pattern_meas = re.compile(r"\s*(?P<func_ret>(\w+|\w+\[(.*?)\])\s*\=)*\s*(\bqg_MeasX|qg_MeasZ\b)\s*\(\s*(?P<array_size>(.*?))\s*\)\s*;")
pattern_main = re.compile(r"\s*(\bvoid|module\b)\s+(\bmain|main1\b)\s*\((.*?)\)\s*(\{)*\s*")
pattern_comment = re.compile(r"\s*//--//--(.*?)--//--//\s*")
fout_name = re.sub('\.qasmh$','_qasm.scaffold',fname)
fout = open(fout_name,'w')
fout.write('#include<stdio.h>\n')
#add instrumentation functions
for q in qgates_1:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("' +gateNames[q] +' %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_1a: #Sdag = S^3
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("S %s\\n",a); printf("S %s\\n",a); printf("S %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_2: #CNOT => CX (target,control)
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, char* b){ printf("'+gateNames[q]+' %s,%s\\n",a,b); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_3:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, char* b, char* c){ printf("' +gateNames[q] +' %s,%s,%s\\n",a,b,c); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_4: #PrepZ, PrepX
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, int i){ printf("' +gateNames[q] +' %s\\n",a); '
fout.write(fstr)
fstr = 'if(i==1){ printf("X %s\\n",a); } }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_5: #MeasX, MeasZ
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("' +gateNames[q] +' %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_6:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, double b){ printf("' +gateNames[q] +' %s,%f\\n",a,b); }\n'
fout.write(fstr)
for q in qgates_7:
instFnName = q
fstr = 'void '+instFnName+'(char** a, int b ){ for(int i = 0; i < b; i++){ printf("' +gateNames[q] +' %s\\n",(*a)); a++; }}\n'
fout.write(fstr)
fout.write('\n')
#ignore contents until QASM Generation Pass
f = open(fname,'r')
b = 'Dummy Line'
while(b!=''):
if(b.find('QASM Generation Pass:')!=-1):
break
b = f.readline()
b = f.readline()
inMainFunc = False
setQbitDecl = []
setCbitDecl = []
while(b!=''):
if(b.find('End of QASM generation')!=-1):
break
#check for qbit declarations
m = re.match(pattern_main,b)
if(m):
inMainFunc = True
b = re.sub(r"\bvoid|module\b","int ",b)
m = re.match(pattern_qbit_decl,b)
if(m): #Matched qbit declaration
numElem = int(m.group('array_size'))
var = m.group('qbit_var')
addAlphabet=''
if(not inMainFunc):
addAlphabet='a' #add 'a' at end of ancilla declaration
subStr = "char* "+m.group('qbit_var')+'['+m.group('array_size')+'] = {'
fout.write(subStr)
for i in range(numElem-1):
varName = var+str(i)+addAlphabet
tmp = '"'+varName+'",'
if varName not in setQbitDecl:
setQbitDecl.append(varName)
fout.write(tmp)
varName = var+str(numElem-1)+addAlphabet
tmp = '"'+varName+'"'
if varName not in setQbitDecl:
setQbitDecl.append(varName)
fout.write(tmp)
fout.write('};\n')
else:
m = re.match(pattern_qg,b)
if(m): #Matched qauntum gate call
qstr = m.group('func_name')
if qstr in qgates:
rstr = 'qg_'+qstr
mystr = b.replace(qstr,rstr)
#check for Meas gates
m1 = re.match(pattern_meas,mystr)
if(m1):
retStr = m1.group('func_ret')
if(retStr):
mystr = mystr.replace(retStr,'')
fout.write(mystr)
else:
fout.write(b)
else:
#substitute qbit as char* in module definitions
m = re.match(pattern_qbit_arg,b)
if(m):
mystr = b
mystr = re.sub(r"\bqbit\b","char* ",mystr)
fout.write(mystr)
else:
m = re.match(pattern_cbit_decl,b)
if(m):
numElem = int(m.group('array_size'))
var = m.group('qbit_var')
subStr = "char* "+m.group('qbit_var')+'['+m.group('array_size')+'] = {'
fout.write(subStr)
for i in range(numElem-1):
tmp = '"'+var+str(i)+'",'
setCbitDecl.append(var+str(i))
fout.write(tmp)
tmp = '"'+var+str(numElem-1)+'"'
setCbitDecl.append(var+str(numElem-1))
fout.write(tmp)
fout.write('};\n')
else:
m = re.match(pattern_comment,b)
if(m):
subStr = 'printf("'+b.rstrip('\n')+'\\n");'
fout.write(subStr)
else:
#print 'Did not match any pattern:',b
fout.write(b)
b = f.readline()
f.close()
fout.close()
#write qbit and cbit declarations to file
fdecl = open("fdecl.out",'w')
for q in setQbitDecl:
myStr = 'qubit '+q+'\n'
fdecl.write(myStr)
for q in setCbitDecl:
myStr = 'cbit '+q+'\n'
fdecl.write(myStr)
fdecl.close()
parser = argparse.ArgumentParser(description='Convert QASM code into flattened QASM code')
parser.add_argument("input")
args = parser.parse_args()
process_qasm(args.input)
|
|
"""Test system log component."""
import logging
from unittest.mock import MagicMock, patch
from homeassistant.core import callback
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import system_log
_LOGGER = logging.getLogger('test_logger')
BASIC_CONFIG = {
'system_log': {
'max_entries': 2,
}
}
async def get_error_log(hass, hass_client, expected_count):
"""Fetch all entries from system_log via the API."""
client = await hass_client()
resp = await client.get('/api/error/all')
assert resp.status == 200
data = await resp.json()
assert len(data) == expected_count
return data
def _generate_and_log_exception(exception, log):
try:
raise Exception(exception)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.exception(log)
def assert_log(log, exception, message, level):
"""Assert that specified values are in a specific log entry."""
assert exception in log['exception']
assert message == log['message']
assert level == log['level']
assert 'timestamp' in log
def get_frame(name):
"""Get log stack frame."""
return (name, None, None, None)
async def test_normal_logs(hass, hass_client):
"""Test that debug and info are not logged."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.debug('debug')
_LOGGER.info('info')
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_exception(hass, hass_client):
"""Test that exceptions are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_generate_and_log_exception('exception message', 'log message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, 'exception message', 'log message', 'ERROR')
async def test_warning(hass, hass_client):
"""Test that warning are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.warning('warning message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, '', 'warning message', 'WARNING')
async def test_error(hass, hass_client):
"""Test that errors are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, '', 'error message', 'ERROR')
async def test_config_not_fire_event(hass):
"""Test that errors are not posted as events with default config."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error('error message')
await hass.async_block_till_done()
assert len(events) == 0
async def test_error_posted_as_event(hass):
"""Test that error are posted as events."""
await async_setup_component(hass, system_log.DOMAIN, {
'system_log': {
'max_entries': 2,
'fire_event': True,
}
})
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error('error message')
await hass.async_block_till_done()
assert len(events) == 1
assert_log(events[0].data, '', 'error message', 'ERROR')
async def test_critical(hass, hass_client):
"""Test that critical are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.critical('critical message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, '', 'critical message', 'CRITICAL')
async def test_remove_older_logs(hass, hass_client):
"""Test that older logs are rotated out."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message 1')
_LOGGER.error('error message 2')
_LOGGER.error('error message 3')
log = await get_error_log(hass, hass_client, 2)
assert_log(log[0], '', 'error message 3', 'ERROR')
assert_log(log[1], '', 'error message 2', 'ERROR')
async def test_dedup_logs(hass, hass_client):
"""Test that duplicate log entries are dedup."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message 1')
_LOGGER.error('error message 2')
_LOGGER.error('error message 2')
_LOGGER.error('error message 3')
log = await get_error_log(hass, hass_client, 2)
assert_log(log[0], '', 'error message 3', 'ERROR')
assert log[1]["count"] == 2
assert_log(log[1], '', 'error message 2', 'ERROR')
async def test_clear_logs(hass, hass_client):
"""Test that the log can be cleared via a service call."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message')
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_CLEAR, {}))
await hass.async_block_till_done()
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_write_log(hass):
"""Test that error propagates to logger."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
with patch('logging.getLogger', return_value=logger) as mock_logging:
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE,
{'message': 'test_message'}))
await hass.async_block_till_done()
mock_logging.assert_called_once_with(
'homeassistant.components.system_log.external')
assert logger.method_calls[0] == ('error', ('test_message',))
async def test_write_choose_logger(hass):
"""Test that correct logger is chosen."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch('logging.getLogger') as mock_logging:
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE,
{'message': 'test_message',
'logger': 'myLogger'}))
await hass.async_block_till_done()
mock_logging.assert_called_once_with(
'myLogger')
async def test_write_choose_level(hass):
"""Test that correct logger is chosen."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
with patch('logging.getLogger', return_value=logger):
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE,
{'message': 'test_message',
'level': 'debug'}))
await hass.async_block_till_done()
assert logger.method_calls[0] == ('debug', ('test_message',))
async def test_unknown_path(hass, hass_client):
"""Test error logged from unknown path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.findCaller = MagicMock(
return_value=('unknown_path', 0, None, None))
_LOGGER.error('error message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'unknown_path'
def log_error_from_test_path(path):
"""Log error while mocking the path."""
call_path = 'internal_path.py'
with patch.object(_LOGGER,
'findCaller',
MagicMock(return_value=(call_path, 0, None, None))):
with patch('traceback.extract_stack',
MagicMock(return_value=[
get_frame('main_path/main.py'),
get_frame(path),
get_frame(call_path),
get_frame('venv_path/logging/log.py')])):
_LOGGER.error('error message')
async def test_homeassistant_path(hass, hass_client):
"""Test error logged from homeassistant path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch('homeassistant.components.system_log.HOMEASSISTANT_PATH',
new=['venv_path/homeassistant']):
log_error_from_test_path(
'venv_path/homeassistant/component/component.py')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'component/component.py'
async def test_config_path(hass, hass_client):
"""Test error logged from config path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch.object(hass.config, 'config_dir', new='config'):
log_error_from_test_path('config/custom_component/test.py')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'custom_component/test.py'
async def test_netdisco_path(hass, hass_client):
"""Test error logged from netdisco path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch.dict('sys.modules',
netdisco=MagicMock(__path__=['venv_path/netdisco'])):
log_error_from_test_path('venv_path/netdisco/disco_component.py')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'disco_component.py'
|
|
# Copyright 2015 Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared function between different SSD implementations.
"""
import numpy as np
import tensorflow as tf
import tf_extended as tfe
# =========================================================================== #
# TensorFlow implementation of boxes SSD encoding / decoding.
# =========================================================================== #
def tf_ssd_bboxes_encode_layer(labels,
bboxes,
anchors_layer,
num_classes,
no_annotation_label,
ignore_threshold=0.5,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
dtype=tf.float32):
"""Encode groundtruth labels and bounding boxes using SSD anchors from
one layer.
Arguments:
labels: 1D Tensor(int64) containing groundtruth labels;
bboxes: Nx4 Tensor(float) with bboxes relative coordinates;
anchors_layer: Numpy array with layer anchors;
matching_threshold: Threshold for positive match with groundtruth bboxes;
prior_scaling: Scaling of encoded coordinates.
Return:
(target_labels, target_localizations, target_scores): Target Tensors.
"""
# Anchors coordinates and volume.
yref, xref, href, wref = anchors_layer
ymin = yref - href / 2.
xmin = xref - wref / 2.
ymax = yref + href / 2.
xmax = xref + wref / 2.
vol_anchors = (xmax - xmin) * (ymax - ymin)
# Initialize tensors...
shape = (yref.shape[0], yref.shape[1], href.size)
feat_labels = tf.zeros(shape, dtype=tf.int64)
feat_scores = tf.zeros(shape, dtype=dtype)
feat_ymin = tf.zeros(shape, dtype=dtype)
feat_xmin = tf.zeros(shape, dtype=dtype)
feat_ymax = tf.ones(shape, dtype=dtype)
feat_xmax = tf.ones(shape, dtype=dtype)
def jaccard_with_anchors(bbox):
"""Compute jaccard score between a box and the anchors.
"""
int_ymin = tf.maximum(ymin, bbox[0])
int_xmin = tf.maximum(xmin, bbox[1])
int_ymax = tf.minimum(ymax, bbox[2])
int_xmax = tf.minimum(xmax, bbox[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
union_vol = vol_anchors - inter_vol \
+ (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
jaccard = tf.div(inter_vol, union_vol)
return jaccard
def intersection_with_anchors(bbox):
"""Compute intersection between score a box and the anchors.
"""
int_ymin = tf.maximum(ymin, bbox[0])
int_xmin = tf.maximum(xmin, bbox[1])
int_ymax = tf.minimum(ymax, bbox[2])
int_xmax = tf.minimum(xmax, bbox[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
scores = tf.div(inter_vol, vol_anchors)
return scores
def condition(i, feat_labels, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax):
"""Condition: check label index.
"""
r = tf.less(i, tf.shape(labels))
return r[0]
def body(i, feat_labels, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax):
"""Body: update feature labels, scores and bboxes.
Follow the original SSD paper for that purpose:
- assign values when jaccard > 0.5;
- only update if beat the score of other bboxes.
"""
# Jaccard score.
label = labels[i]
bbox = bboxes[i]
jaccard = jaccard_with_anchors(bbox)
# Mask: check threshold + scores + no annotations + num_classes.
mask = tf.greater(jaccard, feat_scores)
# mask = tf.logical_and(mask, tf.greater(jaccard, matching_threshold))
mask = tf.logical_and(mask, feat_scores > -0.5)
mask = tf.logical_and(mask, label < num_classes)
imask = tf.cast(mask, tf.int64)
fmask = tf.cast(mask, dtype)
# Update values using mask.
feat_labels = imask * label + (1 - imask) * feat_labels
feat_scores = tf.where(mask, jaccard, feat_scores)
feat_ymin = fmask * bbox[0] + (1 - fmask) * feat_ymin
feat_xmin = fmask * bbox[1] + (1 - fmask) * feat_xmin
feat_ymax = fmask * bbox[2] + (1 - fmask) * feat_ymax
feat_xmax = fmask * bbox[3] + (1 - fmask) * feat_xmax
# Check no annotation label: ignore these anchors...
interscts = intersection_with_anchors(bbox)
mask = tf.logical_and(interscts > ignore_threshold,
label == no_annotation_label)
# Replace scores by -1.
feat_scores = tf.where(mask, -tf.cast(mask, dtype), feat_scores)
return [i+1, feat_labels, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax]
# Main loop definition.
i = 0
[i, feat_labels, feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax] = tf.while_loop(condition, body,
[i, feat_labels, feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax])
# Transform to center / size.
feat_cy = (feat_ymax + feat_ymin) / 2.
feat_cx = (feat_xmax + feat_xmin) / 2.
feat_h = feat_ymax - feat_ymin
feat_w = feat_xmax - feat_xmin
# Encode features.
feat_cy = (feat_cy - yref) / href / prior_scaling[0]
feat_cx = (feat_cx - xref) / wref / prior_scaling[1]
feat_h = tf.log(feat_h / href) / prior_scaling[2]
feat_w = tf.log(feat_w / wref) / prior_scaling[3]
# Use SSD ordering: x / y / w / h instead of ours.
feat_localizations = tf.stack([feat_cx, feat_cy, feat_w, feat_h], axis=-1)
return feat_labels, feat_localizations, feat_scores
def tf_ssd_bboxes_encode(labels,
bboxes,
anchors,
num_classes,
no_annotation_label,
ignore_threshold=0.5,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
dtype=tf.float32,
scope='ssd_bboxes_encode'):
"""Encode groundtruth labels and bounding boxes using SSD net anchors.
Encoding boxes for all feature layers.
Arguments:
labels: 1D Tensor(int64) containing groundtruth labels;
bboxes: Nx4 Tensor(float) with bboxes relative coordinates;
anchors: List of Numpy array with layer anchors;
matching_threshold: Threshold for positive match with groundtruth bboxes;
prior_scaling: Scaling of encoded coordinates.
Return:
(target_labels, target_localizations, target_scores):
Each element is a list of target Tensors.
"""
with tf.name_scope(scope):
target_labels = []
target_localizations = []
target_scores = []
for i, anchors_layer in enumerate(anchors):
with tf.name_scope('bboxes_encode_block_%i' % i):
t_labels, t_loc, t_scores = \
tf_ssd_bboxes_encode_layer(labels, bboxes, anchors_layer,
num_classes, no_annotation_label,
ignore_threshold,
prior_scaling, dtype)
target_labels.append(t_labels)
target_localizations.append(t_loc)
target_scores.append(t_scores)
return target_labels, target_localizations, target_scores
def tf_ssd_bboxes_decode_layer(feat_localizations,
anchors_layer,
prior_scaling=[0.1, 0.1, 0.2, 0.2]):
"""Compute the relative bounding boxes from the layer features and
reference anchor bounding boxes.
Arguments:
feat_localizations: Tensor containing localization features.
anchors: List of numpy array containing anchor boxes.
Return:
Tensor Nx4: ymin, xmin, ymax, xmax
"""
yref, xref, href, wref = anchors_layer
# Compute center, height and width
cx = feat_localizations[:, :, :, :, 0] * wref * prior_scaling[0] + xref
cy = feat_localizations[:, :, :, :, 1] * href * prior_scaling[1] + yref
w = wref * tf.exp(feat_localizations[:, :, :, :, 2] * prior_scaling[2])
h = href * tf.exp(feat_localizations[:, :, :, :, 3] * prior_scaling[3])
# Boxes coordinates.
ymin = cy - h / 2.
xmin = cx - w / 2.
ymax = cy + h / 2.
xmax = cx + w / 2.
bboxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
return bboxes
def tf_ssd_bboxes_decode(feat_localizations,
anchors,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
scope='ssd_bboxes_decode'):
"""Compute the relative bounding boxes from the SSD net features and
reference anchors bounding boxes.
Arguments:
feat_localizations: List of Tensors containing localization features.
anchors: List of numpy array containing anchor boxes.
Return:
List of Tensors Nx4: ymin, xmin, ymax, xmax
"""
with tf.name_scope(scope):
bboxes = []
for i, anchors_layer in enumerate(anchors):
bboxes.append(
tf_ssd_bboxes_decode_layer(feat_localizations[i],
anchors_layer,
prior_scaling))
return bboxes
# =========================================================================== #
# SSD boxes selection.
# =========================================================================== #
def tf_ssd_bboxes_select_layer(predictions_layer, localizations_layer,
select_threshold=None,
num_classes=21,
ignore_class=0,
scope=None):
"""Extract classes, scores and bounding boxes from features in one layer.
Batch-compatible: inputs are supposed to have batch-type shapes.
Args:
predictions_layer: A SSD prediction layer;
localizations_layer: A SSD localization layer;
select_threshold: Classification threshold for selecting a box. All boxes
under the threshold are set to 'zero'. If None, no threshold applied.
Return:
d_scores, d_bboxes: Dictionary of scores and bboxes Tensors of
size Batches X N x 1 | 4. Each key corresponding to a class.
"""
select_threshold = 0.0 if select_threshold is None else select_threshold
with tf.name_scope(scope, 'ssd_bboxes_select_layer',
[predictions_layer, localizations_layer]):
# Reshape features: Batches x N x N_labels | 4
p_shape = tfe.get_shape(predictions_layer)
predictions_layer = tf.reshape(predictions_layer,
tf.stack([p_shape[0], -1, p_shape[-1]]))
l_shape = tfe.get_shape(localizations_layer)
localizations_layer = tf.reshape(localizations_layer,
tf.stack([l_shape[0], -1, l_shape[-1]]))
d_scores = {}
d_bboxes = {}
for c in range(0, num_classes):
if c != ignore_class:
# Remove boxes under the threshold.
scores = predictions_layer[:, :, c]
fmask = tf.cast(tf.greater_equal(scores, select_threshold), scores.dtype)
scores = scores * fmask
bboxes = localizations_layer * tf.expand_dims(fmask, axis=-1)
# Append to dictionary.
d_scores[c] = scores
d_bboxes[c] = bboxes
return d_scores, d_bboxes
def tf_ssd_bboxes_select(predictions_net, localizations_net,
select_threshold=None,
num_classes=21,
ignore_class=0,
scope=None):
"""Extract classes, scores and bounding boxes from network output layers.
Batch-compatible: inputs are supposed to have batch-type shapes.
Args:
predictions_net: List of SSD prediction layers;
localizations_net: List of localization layers;
select_threshold: Classification threshold for selecting a box. All boxes
under the threshold are set to 'zero'. If None, no threshold applied.
Return:
d_scores, d_bboxes: Dictionary of scores and bboxes Tensors of
size Batches X N x 1 | 4. Each key corresponding to a class.
"""
with tf.name_scope(scope, 'ssd_bboxes_select',
[predictions_net, localizations_net]):
l_scores = []
l_bboxes = []
for i in range(len(predictions_net)):
scores, bboxes = tf_ssd_bboxes_select_layer(predictions_net[i],
localizations_net[i],
select_threshold,
num_classes,
ignore_class)
l_scores.append(scores)
l_bboxes.append(bboxes)
# Concat results.
d_scores = {}
d_bboxes = {}
for c in l_scores[0].keys():
ls = [s[c] for s in l_scores]
lb = [b[c] for b in l_bboxes]
d_scores[c] = tf.concat(ls, axis=1)
d_bboxes[c] = tf.concat(lb, axis=1)
return d_scores, d_bboxes
def tf_ssd_bboxes_select_layer_all_classes(predictions_layer, localizations_layer,
select_threshold=None):
"""Extract classes, scores and bounding boxes from features in one layer.
Batch-compatible: inputs are supposed to have batch-type shapes.
Args:
predictions_layer: A SSD prediction layer;
localizations_layer: A SSD localization layer;
select_threshold: Classification threshold for selecting a box. If None,
select boxes whose classification score is higher than 'no class'.
Return:
classes, scores, bboxes: Input Tensors.
"""
# Reshape features: Batches x N x N_labels | 4
p_shape = tfe.get_shape(predictions_layer)
predictions_layer = tf.reshape(predictions_layer,
tf.stack([p_shape[0], -1, p_shape[-1]]))
l_shape = tfe.get_shape(localizations_layer)
localizations_layer = tf.reshape(localizations_layer,
tf.stack([l_shape[0], -1, l_shape[-1]]))
# Boxes selection: use threshold or score > no-label criteria.
if select_threshold is None or select_threshold == 0:
# Class prediction and scores: assign 0. to 0-class
classes = tf.argmax(predictions_layer, axis=2)
scores = tf.reduce_max(predictions_layer, axis=2)
scores = scores * tf.cast(classes > 0, scores.dtype)
else:
sub_predictions = predictions_layer[:, :, 1:]
classes = tf.argmax(sub_predictions, axis=2) + 1
scores = tf.reduce_max(sub_predictions, axis=2)
# Only keep predictions higher than threshold.
mask = tf.greater(scores, select_threshold)
classes = classes * tf.cast(mask, classes.dtype)
scores = scores * tf.cast(mask, scores.dtype)
# Assume localization layer already decoded.
bboxes = localizations_layer
return classes, scores, bboxes
def tf_ssd_bboxes_select_all_classes(predictions_net, localizations_net,
select_threshold=None,
scope=None):
"""Extract classes, scores and bounding boxes from network output layers.
Batch-compatible: inputs are supposed to have batch-type shapes.
Args:
predictions_net: List of SSD prediction layers;
localizations_net: List of localization layers;
select_threshold: Classification threshold for selecting a box. If None,
select boxes whose classification score is higher than 'no class'.
Return:
classes, scores, bboxes: Tensors.
"""
with tf.name_scope(scope, 'ssd_bboxes_select',
[predictions_net, localizations_net]):
l_classes = []
l_scores = []
l_bboxes = []
for i in range(len(predictions_net)):
classes, scores, bboxes = \
tf_ssd_bboxes_select_layer_all_classes(predictions_net[i],
localizations_net[i],
select_threshold)
l_classes.append(classes)
l_scores.append(scores)
l_bboxes.append(bboxes)
classes = tf.concat(l_classes, axis=1)
scores = tf.concat(l_scores, axis=1)
bboxes = tf.concat(l_bboxes, axis=1)
return classes, scores, bboxes
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import glanceclient.v1.images
import routes
import webob
import webob.dec
import webob.request
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack.compute import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.image.glance
from nova.network import api as network_api
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs
from nova import utils
from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v2:
inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
if use_no_auth:
api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/v1.1'] = api_v2
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def wsgi_app_v3(inner_app_v3=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v3:
inner_app_v3 = compute.APIRouterV3(init_only)
if use_no_auth:
api_v3 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v3)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v3 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v3)))
mapper = urlmap.URLMap()
mapper['/v3'] = api_v3
# TODO(cyeoh): bp nova-api-core-as-extensions
# Still need to implement versions for v3 API
# mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
def one_key_pair(context, user_id, name):
if name == 'key':
return dict(name='key', public_key='public_key')
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
stubs.Set(nova.db, 'key_pair_get', one_key_pair)
else:
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
'__init__', fake_rate_init)
stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
'__call__', fake_wsgi)
def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 0.9 -
allowed)
usages[resource]['reserved'] = quotas[resource] * 0.1
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance, name, extra_properties=None):
# emulate glance rejecting image names which are too long
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
stubs.Set(compute_api.API, 'snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, stubs):
self.stubs = stubs
self.extra_props_last_call = None
stubs.Set(compute_api.API, 'backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(stubs,
spectacular=True)
def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
def get_floating_ips_by_fixed_address(self, context, fixed_ip):
return ['1.2.3.4']
if func is None:
func = get_floating_ips_by_fixed_address
stubs.Set(network_api.API, 'get_floating_ips_by_fixed_address', func)
def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake:
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
if cls is None:
cls = Fake
stubs.Set(network_api, 'API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True)
def _make_image_fixtures():
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
image_id = 123
fixtures = []
def add_fixture(**kwargs):
fixtures.append(kwargs)
# Public image
add_fixture(id=image_id, name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="10", size='25165824')
image_id += 1
# Snapshot for User 1
uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
server_ref = 'http://localhost/v2/servers/' + uuid
snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'}
for status in ('queued', 'saving', 'active', 'killed',
'deleted', 'pending_delete'):
deleted = False if status != 'deleted' else True
add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
properties=snapshot_properties, size='25165824',
deleted=deleted)
image_id += 1
# Image without a name
add_fixture(id=image_id, is_public=True, status='active', properties={})
return fixtures
def stub_out_glanceclient_create(stubs, sent_to_glance):
"""
We return the metadata sent to glance by modifying the sent_to_glance dict
in place.
"""
orig_add_image = glanceclient.v1.images.ImageManager.create
def fake_create(context, metadata, data=None):
sent_to_glance['metadata'] = metadata
sent_to_glance['data'] = data
return orig_add_image(metadata, data)
stubs.Set(glanceclient.v1.images.ImageManager, 'create', fake_create)
def stub_out_glance(stubs):
def fake_get_remote_image_service():
client = glance_stubs.StubGlanceClient(_make_image_fixtures())
client_wrapper = nova.image.glance.GlanceClientWrapper()
client_wrapper.host = 'fake_host'
client_wrapper.port = 9292
client_wrapper.client = client
return nova.image.glance.GlanceImageService(client=client_wrapper)
stubs.Set(nova.image.glance,
'get_default_image_service',
fake_get_remote_image_service)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in kwargs.iteritems():
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
return super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class HTTPRequestV3(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
kwargs['base_url'] = 'http://localhost/v3'
use_admin_context = kwargs.pop('use_admin_context', False)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, basestring):
nw_cache = jsonutils.dumps(nw_cache)
return {"info_cache": {"network_info": nw_cache}}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None):
return stub_instance(1, **kwargs)
return _return_server
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
for i in xrange(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def stub_instance(id, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=datetime.datetime.utcnow(),
terminated_at=datetime.datetime.utcnow(),
availability_zone=''):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
sys_meta = flavors.save_flavor_info({}, inst_type)
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"power_state": power_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"ephemeral_gb": 0,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": dict(inst_type),
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": timeutils.utcnow(),
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": "",
"locked": False,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": ""}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'nova'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, search_opts=None):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.NotFound
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.NotFound
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance(context, instance_uuid):
return [{'volume_id': 'volume_id1'}, {'volume_id': 'volume_id2'}]
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from json import loads
from qiita_core.qiita_settings import r_client
from qiita_pet.test.tornado_test_base import TestHandlerBase
class OAuth2BaseHandlerTests(TestHandlerBase):
def setUp(self):
# Create client test authentication token
self.client_token = 'SOMEAUTHTESTINGTOKENHERE2122'
token_info = {
'timestamp': '12/12/12 12:12:00',
'client_id': 'test123123123',
'grant_type': 'client'
}
r_client.hmset(self.client_token, token_info)
r_client.expire(self.client_token, 5)
# Create username test authentication token
self.user_token = 'SOMEAUTHTESTINGTOKENHEREUSERNAME'
token_info = {
'timestamp': '12/12/12 12:12:00',
'client_id': 'testuser',
'grant_type': 'password',
'user': 'test@foo.bar'
}
r_client.hmset(self.user_token, token_info)
r_client.expire(self.user_token, 5)
# Create test access limit token
self.user_rate_key = 'testuser_test@foo.bar_daily_limit'
r_client.setex(self.user_rate_key, 5, 2)
super(OAuth2BaseHandlerTests, self).setUp()
def test_authenticate_header_client(self):
obs = self.get('/qiita_db/artifacts/1/', headers={
'Authorization': 'Bearer ' + self.client_token})
self.assertEqual(obs.code, 200)
def test_authenticate_header_username(self):
obs = self.get('/qiita_db/artifacts/1/', headers={
'Authorization': 'Bearer ' + self.user_token})
self.assertEqual(obs.code, 200)
# Check rate limiting works
self.assertEqual(int(r_client.get(self.user_rate_key)), 1)
r_client.setex('testuser_test@foo.bar_daily_limit', 1, 0)
obs = self.get('/qiita_db/artifacts/100/', headers={
'Authorization': 'Bearer ' + self.user_token})
exp = {'error': 'invalid_grant',
'error_description': 'Oauth2 error: daily request limit reached'
}
self.assertEqual(loads(obs.body), exp)
def test_authenticate_header_missing(self):
obs = self.get('/qiita_db/artifacts/100/')
self.assertEqual(obs.code, 400)
self.assertEqual(loads(obs.body), {
'error': 'invalid_request',
'error_description': 'Oauth2 error: invalid access token'})
def test_authenticate_header_bad_token(self):
obs = self.get('/qiita_db/artifacts/100/', headers={
'Authorization': 'Bearer BADTOKEN'})
self.assertEqual(obs.code, 400)
exp = {'error': 'invalid_grant',
'error_description': 'Oauth2 error: token has timed out'}
self.assertEqual(loads(obs.body), exp)
def test_authenticate_header_bad_header_type(self):
obs = self.get('/qiita_db/artifacts/100/', headers={
'Authorization': 'WRONG ' + self.client_token})
self.assertEqual(obs.code, 400)
exp = {'error': 'invalid_grant',
'error_description': 'Oauth2 error: invalid access token'}
self.assertEqual(loads(obs.body), exp)
class OAuth2HandlerTests(TestHandlerBase):
def test_authenticate_client_header(self):
# Authenticate using header
obs = self.post(
'/qiita_db/authenticate/', {'grant_type': 'client'}, {
'Authorization': 'Basic MTluZGtPM29NS3NvQ2hqVlZXbHVGN1FreEhSZl'
'loVEtTRmJBVnQ4SWhLN2daZ0RhTzQ6SjdGZlE3Q1FkT3'
'h1S2hRQWYxZW9HZ0JBRTgxTnM4R3UzRUthV0ZtM0lPMk'
'pLaEFtbUNXWnVhYmUwTzVNcDI4czE='})
self.assertEqual(obs.code, 200)
obs_body = loads(obs.body)
exp = {'access_token': obs_body['access_token'],
'token_type': 'Bearer',
'expires_in': 3600}
self.assertDictEqual(obs_body, exp)
# Make sure token in system with proper ttl
token = r_client.hgetall(obs_body['access_token'])
exp = {
b'timestamp': token[b'timestamp'],
b'client_id': (b'19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAV'
b't8IhK7gZgDaO4'),
b'grant_type': b'client'
}
self.assertDictEqual(token, exp)
self.assertEqual(r_client.ttl(obs_body['access_token']), 3600)
def test_authenticate_client_post(self):
# Authenticate using post only
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'client_secret': 'J7FfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2J'
'KhAmmCWZuabe0O5Mp28s1'})
self.assertEqual(obs.code, 200)
obs_body = loads(obs.body)
exp = {'access_token': obs_body['access_token'],
'token_type': 'Bearer',
'expires_in': 3600}
self.assertDictEqual(obs_body, exp)
# Make sure token in system with proper ttl
token = r_client.hgetall(obs_body['access_token'])
exp = {
b'timestamp': token[b'timestamp'],
b'client_id': (b'19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8'
b'IhK7gZgDaO4'),
b'grant_type': b'client'
}
self.assertDictEqual(token, exp)
self.assertEqual(r_client.ttl(obs_body['access_token']), 3600)
def test_authenticate_client_bad_base64_hash(self):
# Authenticate using bad header
obs = self.post(
'/qiita_db/authenticate/', {'grant_type': 'client'}, {
'Authorization': 'Basic MTluZGtPM29NS3NvQ2hqVlZXbHVGN1FreEhSZl'
'loVEtTRmJBVnQ4SBADN2daZ0RhTzQ6SjdGZlE3Q1FkT3'
'h1S2hRQWYxZW9HZ0JBRTgxTnM4R3UzRUthV0ZtM0lPMk'
'pLaEFtbUNXWnVhYmUwTzVNcDI4czE='})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_bad_header_base64_hash(self):
obs = self.post(
'/qiita_db/authenticate/', {'grant_type': 'client'}, {
'Authorization': 'WRONG MTluZGtPM29NS3NvQ2hqVlZXbHVGN1FreEhSZl'
'loVEtTRmJBVnQ4SWhLN2daZ0RhTzQ6SjdGZlE3Q1FkT3'
'h1S2hRQWYxZW9HZ0JBRTgxTnM4R3UzRUthV0ZtM0lPMk'
'pLaEFtbUNXWnVhYmUwTzVNcDI4czE='})
obs_body = loads(obs.body)
exp = {'error': 'invalid_request',
'error_description': 'Oauth2 error: invalid token type'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_bad_client_id(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': 'BADdkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'client_secret': 'J7FfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2J'
'KhAmmCWZuabe0O5Mp28s1'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_bad_client_secret(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'client_secret': 'BADfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2J'
'KhAmmCWZuabe0O5Mp28s1'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_missing_info(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_request',
'error_description': 'Oauth2 error: missing client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'test@foo.bar',
'password': 'password'})
self.assertEqual(obs.code, 200)
obs_body = loads(obs.body)
exp = {'access_token': obs_body['access_token'],
'token_type': 'Bearer',
'expires_in': 3600}
self.assertDictEqual(obs_body, exp)
# Make sure token in system with proper ttl
token = r_client.hgetall(obs_body['access_token'])
exp = {b'timestamp': token[b'timestamp'],
b'user': b'test@foo.bar',
b'client_id': token[b'client_id'],
b'grant_type': b'password'}
self.assertDictEqual(token, exp)
self.assertEqual(r_client.ttl(obs_body['access_token']), 3600)
def test_authenticate_password_non_user_client_id_header(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'username': 'test@foo.bar',
'password': 'password'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_non_user_client_id(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'WAAAAAAAAAARG',
'username': 'test@foo.bar',
'password': 'password'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_bad_user_id(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'BROKEN@FAKE.COM',
'password': 'password'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid user information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_bad_password(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'test@foo.bar',
'password': 'NOTAReALPASSworD'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid user information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_missing_info(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'test@foo.bar'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_request',
'error_description': 'Oauth2 error: missing user information'}
self.assertEqual(obs_body, exp)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This script generates the "CREATE TABLE", "INSERT", and "LOAD" statements for loading
# test data and writes them to create-*-generated.sql and
# load-*-generated.sql. These files are then executed by hive or impala, depending
# on their contents. Additionally, for hbase, the file is of the form
# create-*hbase*-generated.create.
#
# The statements that are generated are based on an input test vector
# (read from a file) that describes the coverage desired. For example, currently
# we want to run benchmarks with different data sets, across different file types, and
# with different compression algorithms set. To improve data loading performance this
# script will generate an INSERT INTO statement to generate the data if the file does
# not already exist in HDFS. If the file does already exist in HDFS then we simply issue a
# LOAD statement which is much faster.
#
# The input test vectors are generated via the generate_test_vectors.py so
# ensure that script has been run (or the test vector files already exist) before
# running this script.
#
# Note: This statement generation is assuming the following data loading workflow:
# 1) Load all the data in the specified source table
# 2) Create tables for the new file formats and compression types
# 3) Run INSERT OVERWRITE TABLE SELECT * from the source table into the new tables
# or LOAD directly if the file already exists in HDFS.
import collections
import csv
import glob
import json
import math
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
from itertools import product
from optparse import OptionParser
from tests.util.test_file_parser import *
from tests.common.test_dimensions import *
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core", help="The exploration strategy for schema gen: 'core',"\
" 'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workload", dest="workload",
help="The workload to generate schema for: tpch, hive-benchmark, ...")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default= False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default = False, help="If set, outputs additional logging.")
parser.add_option("-b", "--backend", dest="backend", default="localhost:21000",
help="Backend connection to use, default: localhost:21000")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
(options, args) = parser.parse_args()
if options.workload is None:
print "A workload name must be specified."
parser.print_help()
sys.exit(1)
WORKLOAD_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata', 'workloads')
DATASET_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata', 'datasets')
SQL_OUTPUT_DIR = os.environ['IMPALA_DATA_LOADING_SQL_DIR']
AVRO_SCHEMA_DIR = "avro_schemas"
DEFAULT_FS=os.environ['DEFAULT_FS']
IMPALA_SUPPORTED_INSERT_FORMATS = ['parquet', 'hbase', 'text', 'kudu']
COMPRESSION_TYPE = "SET mapred.output.compression.type=%s;"
COMPRESSION_ENABLED = "SET hive.exec.compress.output=%s;"
COMPRESSION_CODEC = "SET mapred.output.compression.codec=%s;"
AVRO_COMPRESSION_CODEC = "SET avro.output.codec=%s;"
SET_DYNAMIC_PARTITION_STATEMENT = "SET hive.exec.dynamic.partition=true;"
SET_PARTITION_MODE_NONSTRICT_STATEMENT = "SET hive.exec.dynamic.partition.mode=nonstrict;"
SET_HIVE_INPUT_FORMAT = "SET mapred.max.split.size=256000000;\n"\
"SET hive.input.format=org.apache.hadoop.hive.ql.io.%s;\n"
SET_HIVE_HBASE_BULK_LOAD = "SET hive.hbase.bulk = true"
FILE_FORMAT_IDX = 0
DATASET_IDX = 1
CODEC_IDX = 2
COMPRESSION_TYPE_IDX = 3
COMPRESSION_MAP = {'def': 'org.apache.hadoop.io.compress.DefaultCodec',
'gzip': 'org.apache.hadoop.io.compress.GzipCodec',
'bzip': 'org.apache.hadoop.io.compress.BZip2Codec',
'snap': 'org.apache.hadoop.io.compress.SnappyCodec',
'lzo': 'com.hadoop.compression.lzo.LzopCodec',
'none': ''
}
AVRO_COMPRESSION_MAP = {
'def': 'deflate',
'snap': 'snappy',
'none': '',
}
FILE_FORMAT_MAP = {
'text': 'TEXTFILE',
'seq': 'SEQUENCEFILE',
'rc': 'RCFILE',
'parquet': 'PARQUET',
'text_lzo':
"\nINPUTFORMAT 'com.hadoop.mapred.DeprecatedLzoTextInputFormat'" +
"\nOUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'",
'avro': 'AVRO',
'hbase': "'org.apache.hadoop.hive.hbase.HBaseStorageHandler'",
'kudu': "KUDU",
}
HIVE_TO_AVRO_TYPE_MAP = {
'STRING': 'string',
'INT': 'int',
'TINYINT': 'int',
'SMALLINT': 'int',
'BIGINT': 'long',
'BOOLEAN': 'boolean',
'FLOAT': 'float',
'DOUBLE': 'double',
# Avro has no timestamp type, so convert to string
# TODO: this allows us to create our Avro test tables, but any tests that use
# a timestamp column will fail. We probably want to convert back to timestamps
# in our tests.
'TIMESTAMP': 'string',
}
PARQUET_ALTER_STATEMENT = "ALTER TABLE %(table_name)s SET\n\
SERDEPROPERTIES ('blocksize' = '1073741824', 'compression' = '%(compression)s');"
HBASE_CREATE_STATEMENT = """
CREATE EXTERNAL TABLE IF NOT EXISTS {{db_name}}{{db_suffix}}.{{table_name}} (
{columns})
STORED BY {{file_format}}
WITH SERDEPROPERTIES (
"hbase.columns.mapping" =
"{hbase_column_mapping}")
{tbl_properties}{{hdfs_location}}"""
KNOWN_EXPLORATION_STRATEGIES = ['core', 'pairwise', 'exhaustive', 'lzo']
def build_create_statement(table_template, table_name, db_name, db_suffix,
file_format, compression, hdfs_location):
create_stmt = 'CREATE DATABASE IF NOT EXISTS %s%s;\n' % (db_name, db_suffix)
if (options.force_reload):
create_stmt += 'DROP TABLE IF EXISTS %s%s.%s;\n' % (db_name, db_suffix, table_name)
if compression == 'lzo':
file_format = '%s_%s' % (file_format, compression)
# hbase / kudu tables are external, and not read from hdfs. We don't need an
# hdfs_location.
if file_format in ['hbase', 'kudu']:
hdfs_location = str()
# Remove location part from the format string
table_template = table_template.replace("LOCATION '{hdfs_location}'", "")
create_stmt += table_template.format(db_name=db_name,
db_suffix=db_suffix,
table_name=table_name,
file_format=FILE_FORMAT_MAP[file_format],
hdfs_location=hdfs_location)
return create_stmt
def build_table_template(file_format, columns, partition_columns, row_format,
avro_schema_dir, table_name, table_properties):
if file_format == 'hbase':
return build_hbase_create_stmt_in_hive(columns, partition_columns, table_name)
primary_keys_clause = ""
partitioned_by = str()
if partition_columns:
partitioned_by = 'PARTITIONED BY (%s)' % ', '.join(partition_columns.split('\n'))
row_format_stmt = str()
if row_format and file_format != 'kudu':
row_format_stmt = 'ROW FORMAT ' + row_format
file_format_string = "STORED AS {file_format}"
tblproperties_clause = "TBLPROPERTIES (\n{0}\n)"
tblproperties = {}
external = "EXTERNAL"
if file_format == 'avro':
# TODO Is this flag ever used?
if options.hdfs_namenode is None:
tblproperties["avro.schema.url"] = "%s/%s/%s/{table_name}.json" \
% (DEFAULT_FS, options.hive_warehouse_dir, avro_schema_dir)
else:
tblproperties["avro.schema.url"] = "hdfs://%s/%s/%s/{table_name}.json" \
% (options.hdfs_namenode, options.hive_warehouse_dir, avro_schema_dir)
elif file_format in 'parquet':
row_format_stmt = str()
elif file_format == 'kudu':
# Use partitioned_by to set a trivial hash distribution
assert not partitioned_by, "Kudu table shouldn't have partition cols defined"
partitioned_by = "partition by hash partitions 3"
# Fetch KUDU host and port from environment
kudu_master = os.getenv("KUDU_MASTER_HOSTS", "127.0.0.1")
kudu_master_port = os.getenv("KUDU_MASTER_PORT", "7051")
row_format_stmt = str()
tblproperties["kudu.master_addresses"] = \
"{0}:{1}".format(kudu_master, kudu_master_port)
primary_keys_clause = ", PRIMARY KEY (%s)" % columns.split("\n")[0].split(" ")[0]
# Kudu's test tables are managed.
external = ""
# Read the properties specified in the TABLE_PROPERTIES section. When the specified
# properties have the same key as a default property, the value for the specified
# property is used.
if table_properties:
for table_property in table_properties.split("\n"):
format_prop = table_property.split(":")
if format_prop[0] == file_format:
key_val = format_prop[1].split("=");
tblproperties[key_val[0]] = key_val[1]
all_tblproperties = []
for key, value in tblproperties.iteritems():
all_tblproperties.append("'{0}' = '{1}'".format(key, value))
# If there are no properties to set avoid the TBLPROPERTIES clause altogether.
if not all_tblproperties:
tblproperties_clause = ""
else:
tblproperties_clause = tblproperties_clause.format(",\n".join(all_tblproperties))
# Note: columns are ignored but allowed if a custom serde is specified
# (e.g. Avro)
stmt = """
CREATE {external} TABLE IF NOT EXISTS {{db_name}}{{db_suffix}}.{{table_name}} (
{columns}
{primary_keys})
{partitioned_by}
{row_format}
{file_format_string}
LOCATION '{{hdfs_location}}'
{tblproperties}
""".format(
external=external,
row_format=row_format_stmt,
columns=',\n'.join(columns.split('\n')),
primary_keys=primary_keys_clause,
partitioned_by=partitioned_by,
tblproperties=tblproperties_clause,
file_format_string=file_format_string
).strip()
# Remove empty lines from the stmt string. There is an empty line for
# each of the sections that didn't have anything (e.g. partitioned_by)
stmt = os.linesep.join([s for s in stmt.splitlines() if s])
stmt += ';'
return stmt
def build_hbase_create_stmt_in_hive(columns, partition_columns, table_name):
# The hbase create statement differs sufficiently from the generic create to justify a
# separate method. Specifically, STORED AS becomes STORED BY. There is section called
# serdeproperties, the partition colmns have to be appended to columns in the schema.
columns = columns.split('\n')
# partition columns have to be appended to the columns in the schema.
# PARTITIONED BY is not supported and does not make sense for HBase.
if partition_columns:
columns.extend(partition_columns.split('\n'))
# stringid is a special case. It still points to functional_hbase.alltypesagg
if 'stringid' not in table_name:
tbl_properties = ('TBLPROPERTIES("hbase.table.name" = '
'"{db_name}{db_suffix}.{table_name}")')
else:
tbl_properties = ('TBLPROPERTIES("hbase.table.name" = '
'"{db_name}{db_suffix}.alltypesagg")')
# build hbase column mapping, the first column is implicitly the primary key
# which has a diffrerent representation [:key]
hbase_column_mapping = ["d:%s" % c.split(' ')[0] for c in columns[1:]]
hbase_column_mapping = ":key," + ','.join(hbase_column_mapping)
stmt = HBASE_CREATE_STATEMENT.format(
columns=',\n'.join(columns),
hbase_column_mapping=hbase_column_mapping,
tbl_properties=tbl_properties,
).strip()
return stmt + ';'
def avro_schema(columns):
record = {
"name": "a", # doesn't matter
"type": "record",
"fields": list()
}
for column_spec in columns.strip().split('\n'):
# column_spec looks something like "col_name col_type COMMENT comment"
# (comment may be omitted, we don't use it)
name = column_spec.split()[0]
if "DECIMAL" in column_spec.upper():
if column_spec.split()[1].upper() == "DECIMAL":
# No scale and precision specified, use defaults
scale = 0
precision = 9
else:
# Parse out scale and precision from decimal type
m = re.search("DECIMAL\((?P<precision>.*),(?P<scale>.*)\)", column_spec.upper())
assert m, "Could not parse decimal column spec: " + column_spec
scale = int(m.group('scale'))
precision = int(m.group('precision'))
type = {"type": "bytes", "logicalType": "decimal", "precision": precision,
"scale": scale}
else:
hive_type = column_spec.split()[1]
type = HIVE_TO_AVRO_TYPE_MAP[hive_type.upper()]
record["fields"].append(
{'name': name,
'type': [type, "null"]}) # all columns nullable
return json.dumps(record)
def build_compression_codec_statement(codec, compression_type, file_format):
codec = AVRO_COMPRESSION_MAP[codec] if file_format == 'avro' else COMPRESSION_MAP[codec]
if not codec:
return str()
return (AVRO_COMPRESSION_CODEC % codec) if file_format == 'avro' else (
COMPRESSION_TYPE % compression_type.upper() + '\n' + COMPRESSION_CODEC % codec)
def build_codec_enabled_statement(codec):
compression_enabled = 'false' if codec == 'none' else 'true'
return COMPRESSION_ENABLED % compression_enabled
def build_insert_into_statement(insert, db_name, db_suffix, table_name, file_format,
hdfs_path, for_impala=False):
insert_statement = insert.format(db_name=db_name,
db_suffix=db_suffix,
table_name=table_name,
hdfs_location=hdfs_path)
# Kudu tables are managed and don't support OVERWRITE, so we replace OVERWRITE
# with INTO to make this a regular INSERT.
if file_format == 'kudu':
insert_statement = insert_statement.replace("OVERWRITE", "INTO")
if for_impala:
return insert_statement
statement = SET_PARTITION_MODE_NONSTRICT_STATEMENT + "\n"
statement += SET_DYNAMIC_PARTITION_STATEMENT + "\n"
statement += "set hive.auto.convert.join=true;\n"
# For some reason (hive bug?) we need to have the CombineHiveInputFormat set
# for cases where we are compressing in bzip or lzo on certain tables that
# have multiple files.
if 'multi' in table_name and ('bzip' in db_suffix or 'lzo' in db_suffix):
statement += SET_HIVE_INPUT_FORMAT % "CombineHiveInputFormat"
else:
statement += SET_HIVE_INPUT_FORMAT % "HiveInputFormat"
return statement + insert_statement
def build_hbase_insert(db_name, db_suffix, table_name):
hbase_insert = SET_HIVE_HBASE_BULK_LOAD + ';\n'
hbase_insert += ("INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name}"
" SELECT * FROM {db_name}.{table_name};\n").\
format(db_name=db_name, db_suffix=db_suffix,table_name=table_name)
return hbase_insert
def build_insert(insert, db_name, db_suffix, file_format,
codec, compression_type, table_name, hdfs_path, create_hive=False):
# HBASE inserts don't need the hive options to be set, and don't require and HDFS
# file location, so they're handled separately.
if file_format == 'hbase' and not create_hive:
return build_hbase_insert(db_name, db_suffix, table_name)
output = build_codec_enabled_statement(codec) + "\n"
output += build_compression_codec_statement(codec, compression_type, file_format) + "\n"
output += build_insert_into_statement(insert, db_name, db_suffix,
table_name, file_format, hdfs_path) + "\n"
return output
def build_load_statement(load_template, db_name, db_suffix, table_name):
# hbase does not need the hdfs path.
if table_name.startswith('hbase'):
load_template = load_template.format(table_name=table_name,
db_name=db_name,
db_suffix=db_suffix)
else:
base_load_dir = os.getenv("REMOTE_LOAD", os.getenv("IMPALA_HOME"))
load_template = load_template.format(table_name=table_name,
db_name=db_name,
db_suffix=db_suffix,
impala_home = base_load_dir)
return load_template
def build_hbase_create_stmt(db_name, table_name, column_families):
hbase_table_name = "{db_name}_hbase.{table_name}".format(db_name=db_name,
table_name=table_name)
create_stmt = list()
create_stmt.append("disable '%s'" % hbase_table_name)
create_stmt.append("drop '%s'" % hbase_table_name)
column_families = ','.join(["'{0}'".format(cf) for cf in column_families.splitlines()])
create_stmt.append("create '%s', %s" % (hbase_table_name, column_families))
return create_stmt
def build_db_suffix(file_format, codec, compression_type):
if file_format == 'text' and codec == 'none':
return ''
elif codec == 'none':
return '_%s' % (file_format)
elif compression_type == 'record':
return '_%s_record_%s' % (file_format, codec)
else:
return '_%s_%s' % (file_format, codec)
# Does a hdfs directory listing and returns array with all the subdir names.
def get_hdfs_subdirs_with_data(path):
tmp_file = tempfile.TemporaryFile("w+")
cmd = "hadoop fs -du %s | grep -v '^0' | awk '{print $3}'" % path
subprocess.call([cmd], shell = True, stderr = open('/dev/null'), stdout = tmp_file)
tmp_file.seek(0)
# Results look like:
# <acls> - <user> <group> <date> /directory/subdirectory
# So to get subdirectory names just return everything after the last '/'
return [line[line.rfind('/') + 1:].strip() for line in tmp_file.readlines()]
class Statements(object):
"""Simple container object for storing SQL statements to be output to a
file. Useful for ordering the statements correctly."""
def __init__(self):
self.create = list()
self.load = list()
self.load_base = list()
def write_to_file(self, filename):
# If there is no content to write, skip
if self.__is_empty(): return
output = self.create + self.load_base + self.load
with open(filename, 'w') as f:
f.write('\n\n'.join(output))
def __is_empty(self):
return not (self.create or self.load or self.load_base)
def eval_section(section_str):
"""section_str should be the contents of a section (i.e. a string). If section_str
starts with `, evaluates section_str as a shell command and returns the
output. Otherwise returns section_str."""
if not section_str.startswith('`'): return section_str
cmd = section_str[1:]
# Use bash explicitly instead of setting shell=True so we get more advanced shell
# features (e.g. "for i in {1..n}")
p = subprocess.Popen(['/bin/bash', '-c', cmd], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr: print stderr
assert p.returncode == 0
return stdout.strip()
def generate_statements(output_name, test_vectors, sections,
schema_include_constraints, schema_exclude_constraints,
schema_only_constraints):
# TODO: This method has become very unwieldy. It has to be re-factored sooner than
# later.
# Parquet statements to be executed separately by Impala
hive_output = Statements()
hbase_output = Statements()
hbase_post_load = Statements()
table_names = None
if options.table_names:
table_names = [name.lower() for name in options.table_names.split(',')]
existing_tables = get_hdfs_subdirs_with_data(options.hive_warehouse_dir)
for row in test_vectors:
impala_output = Statements()
impala_load = Statements()
file_format, data_set, codec, compression_type =\
[row.file_format, row.dataset, row.compression_codec, row.compression_type]
table_format = '%s/%s/%s' % (file_format, codec, compression_type)
for section in sections:
table_name = section['BASE_TABLE_NAME'].strip()
db_suffix = build_db_suffix(file_format, codec, compression_type)
db_name = '{0}{1}'.format(data_set, options.scale_factor)
db = '{0}{1}'.format(db_name, db_suffix)
if table_names and (table_name.lower() not in table_names):
print 'Skipping table: %s.%s, table is not in specified table list' % (db, table_name)
continue
if table_format in schema_only_constraints and \
table_name.lower() not in schema_only_constraints[table_format]:
print ('Skipping table: %s.%s, \'only\' constraint for format did not '
'include this table.') % (db, table_name)
continue
if schema_include_constraints[table_name.lower()] and \
table_format not in schema_include_constraints[table_name.lower()]:
print 'Skipping \'%s.%s\' due to include constraint match.' % (db, table_name)
continue
if schema_exclude_constraints[table_name.lower()] and\
table_format in schema_exclude_constraints[table_name.lower()]:
print 'Skipping \'%s.%s\' due to exclude constraint match.' % (db, table_name)
continue
alter = section.get('ALTER')
create = section['CREATE']
create_hive = section['CREATE_HIVE']
table_properties = section['TABLE_PROPERTIES']
insert = eval_section(section['DEPENDENT_LOAD'])
load = eval_section(section['LOAD'])
if file_format == 'kudu':
create_kudu = section["CREATE_KUDU"]
if section['DEPENDENT_LOAD_KUDU']:
insert = eval_section(section['DEPENDENT_LOAD_KUDU'])
else:
create_kudu = None
# For some datasets we may want to use a different load strategy when running local
# tests versus tests against large scale factors. The most common reason is to
# reduce he number of partitions for the local test environment
if not options.scale_factor and section['LOAD_LOCAL']:
load = section['LOAD_LOCAL']
columns = eval_section(section['COLUMNS']).strip()
partition_columns = section['PARTITION_COLUMNS'].strip()
row_format = section['ROW_FORMAT'].strip()
# Force reloading of the table if the user specified the --force option or
# if the table is partitioned and there was no ALTER section specified. This is to
# ensure the partition metadata is always properly created. The ALTER section is
# used to create partitions, so if that section exists there is no need to force
# reload.
# TODO: Rename the ALTER section to ALTER_TABLE_ADD_PARTITION
force_reload = options.force_reload or (partition_columns and not alter)
hdfs_location = '{0}.{1}{2}'.format(db_name, table_name, db_suffix)
# hdfs file names for hive-benchmark and functional datasets are stored
# directly under /test-warehouse
# TODO: We should not need to specify the hdfs file path in the schema file.
# This needs to be done programmatically.
if data_set in ['hive-benchmark', 'functional']:
hdfs_location = hdfs_location.split('.')[-1]
# hive does not allow hyphenated table names.
if data_set == 'hive-benchmark':
db_name = '{0}{1}'.format('hivebenchmark', options.scale_factor)
data_path = os.path.join(options.hive_warehouse_dir, hdfs_location)
# Empty tables (tables with no "LOAD" sections) are assumed to be used for insert
# testing. Since Impala currently only supports inserting into TEXT, PARQUET and
# HBASE we need to create these tables with a supported insert format.
create_file_format = file_format
create_codec = codec
if not (section['LOAD'] or section['LOAD_LOCAL'] or section['DEPENDENT_LOAD']):
create_codec = 'none'
create_file_format = file_format
if file_format not in IMPALA_SUPPORTED_INSERT_FORMATS:
create_file_format = 'text'
output = impala_output
if create_hive or file_format == 'hbase':
output = hive_output
elif codec == 'lzo':
# Impala CREATE TABLE doesn't allow INPUTFORMAT.
output = hive_output
# TODO: Currently, Kudu does not support partitioned tables via Impala.
# If a CREATE_KUDU section was provided, assume it handles the partition columns
if file_format == 'kudu' and partition_columns != '' and not create_kudu:
print "Ignore partitions on Kudu table: %s.%s" % (db_name, table_name)
continue
# If a CREATE section is provided, use that. Otherwise a COLUMNS section
# must be provided (and optionally PARTITION_COLUMNS and ROW_FORMAT
# sections), which is used to generate the create table statement.
if create_hive:
table_template = create_hive
elif create_kudu:
table_template = create_kudu
elif create:
table_template = create
if file_format in ['avro', 'hbase', 'kudu']:
# We don't know how to generalize CREATE sections to Avro and hbase.
print ("CREATE section not supported with %s, "
"skipping: '%s'" % (file_format, table_name))
continue
elif columns:
avro_schema_dir = "%s/%s" % (AVRO_SCHEMA_DIR, data_set)
table_template = build_table_template(
create_file_format, columns, partition_columns,
row_format, avro_schema_dir, table_name, table_properties)
# Write Avro schema to local file
if file_format == 'avro':
if not os.path.exists(avro_schema_dir):
os.makedirs(avro_schema_dir)
with open("%s/%s.json" % (avro_schema_dir, table_name),"w") as f:
f.write(avro_schema(columns))
else:
table_template = None
if table_template:
output.create.append(build_create_statement(table_template, table_name, db_name,
db_suffix, create_file_format, create_codec, data_path))
# HBASE create table
if file_format == 'hbase':
# If the HBASE_COLUMN_FAMILIES section does not exist, default to 'd'
column_families = section.get('HBASE_COLUMN_FAMILIES', 'd')
hbase_output.create.extend(build_hbase_create_stmt(db_name, table_name,
column_families))
hbase_post_load.load.append("flush '%s_hbase.%s'\n" % (db_name, table_name))
# The ALTER statement in hive does not accept fully qualified table names so
# insert a use statement. The ALTER statement is skipped for HBASE as it's
# used for adding partitions.
# TODO: Consider splitting the ALTER subsection into specific components. At the
# moment, it assumes we're only using ALTER for partitioning the table.
if alter and file_format not in ("hbase", "kudu"):
use_db = 'USE {db_name};\n'.format(db_name=db)
if output == hive_output and codec == 'lzo':
# Hive ALTER TABLE ADD PARTITION doesn't handle null partitions, so
# we can't run the ALTER section in this case.
if options.force_reload:
# IMPALA-2278: Hive INSERT OVERWRITE won't clear out partition directories
# that weren't already added to the table. So, for force reload, manually
# delete the partition directories.
output.create.append(("DFS -rm -R {data_path};").format(
data_path=data_path));
else:
# If this is not a force reload use msck repair to add the partitions
# into the table.
output.create.append(use_db + 'msck repair table %s;' % (table_name))
else:
output.create.append(use_db + alter.format(table_name=table_name))
# If the directory already exists in HDFS, assume that data files already exist
# and skip loading the data. Otherwise, the data is generated using either an
# INSERT INTO statement or a LOAD statement.
if not force_reload and hdfs_location in existing_tables:
print 'HDFS path:', data_path, 'contains data. Data loading can be skipped.'
else:
print 'HDFS path:', data_path, 'does not exists or is empty. Data will be loaded.'
if not db_suffix:
if load:
hive_output.load_base.append(build_load_statement(load, db_name,
db_suffix, table_name))
else:
print 'Empty base table load for %s. Skipping load generation' % table_name
elif file_format in ['kudu', 'parquet']:
if insert:
impala_load.load.append(build_insert_into_statement(insert, db_name,
db_suffix, table_name, file_format, data_path, for_impala=True))
else:
print 'Empty parquet/kudu load for table %s. Skipping insert generation' \
% table_name
else:
if insert:
hive_output.load.append(build_insert(insert, db_name, db_suffix, file_format,
codec, compression_type, table_name, data_path,
create_hive=create_hive))
else:
print 'Empty insert for table %s. Skipping insert generation' % table_name
impala_output.write_to_file("load-%s-impala-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
impala_load.write_to_file("load-%s-impala-load-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
hive_output.write_to_file('load-' + output_name + '-hive-generated.sql')
hbase_output.create.append("exit")
hbase_output.write_to_file('load-' + output_name + '-hbase-generated.create')
hbase_post_load.load.append("exit")
hbase_post_load.write_to_file('post-load-' + output_name + '-hbase-generated.sql')
def parse_schema_template_file(file_name):
VALID_SECTION_NAMES = ['DATASET', 'BASE_TABLE_NAME', 'COLUMNS', 'PARTITION_COLUMNS',
'ROW_FORMAT', 'CREATE', 'CREATE_HIVE', 'CREATE_KUDU',
'DEPENDENT_LOAD', 'DEPENDENT_LOAD_KUDU', 'LOAD',
'LOAD_LOCAL', 'ALTER', 'HBASE_COLUMN_FAMILIES', 'TABLE_PROPERTIES']
return parse_test_file(file_name, VALID_SECTION_NAMES, skip_unknown_sections=False)
if __name__ == "__main__":
if options.table_formats is None:
if options.exploration_strategy not in KNOWN_EXPLORATION_STRATEGIES:
print 'Invalid exploration strategy:', options.exploration_strategy
print 'Valid values:', ', '.join(KNOWN_EXPLORATION_STRATEGIES)
sys.exit(1)
test_vectors = [vector.value for vector in\
load_table_info_dimension(options.workload, options.exploration_strategy)]
else:
table_formats = options.table_formats.split(',')
dataset = get_dataset_from_workload(options.workload)
test_vectors =\
[TableFormatInfo.create_from_string(dataset, tf) for tf in table_formats]
target_dataset = test_vectors[0].dataset
print 'Target Dataset: ' + target_dataset
dataset_load_dir = os.path.join(SQL_OUTPUT_DIR, target_dataset)
# If the directory containing the sql files does not exist, create it. Else nuke all the
# files corresponding to the current workload.
try:
os.makedirs(dataset_load_dir)
except OSError:
# Directory already exists, remove it.
shutil.rmtree(dataset_load_dir)
# Recreate the workload dir
os.makedirs(dataset_load_dir)
finally:
# Make sure that the directory was created and is empty.
assert os.path.isdir(dataset_load_dir)
assert len(os.listdir(dataset_load_dir)) == 0
# Make the dataset dir the current working directory
os.chdir(dataset_load_dir)
schema_template_file = os.path.join(DATASET_DIR, target_dataset,
'%s_schema_template.sql' % target_dataset)
if not os.path.isfile(schema_template_file):
print 'Schema file not found: ' + schema_template_file
sys.exit(1)
constraints_file = os.path.join(DATASET_DIR, target_dataset, 'schema_constraints.csv')
include_constraints, exclude_constraints, only_constraints = \
parse_table_constraints(constraints_file)
sections = parse_schema_template_file(schema_template_file)
generate_statements('%s-%s' % (options.workload, options.exploration_strategy),
test_vectors, sections, include_constraints, exclude_constraints, only_constraints)
|
|
# Copyright 2011 VMware, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import os
import random
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_messaging import server as rpc_server
from oslo_service import loopingcall
from oslo_service import service as common_service
from oslo_utils import excutils
from oslo_utils import importutils
from neutron._i18n import _LE, _LI
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import config
from neutron.common import profiler
from neutron.common import rpc as n_rpc
from neutron.conf import service
from neutron import context
from neutron.db import api as session
from neutron import manager
from neutron import worker as neutron_worker
from neutron import wsgi
service.register_service_opts(service.service_opts)
LOG = logging.getLogger(__name__)
class WsgiService(object):
"""Base class for WSGI based services.
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
"""
def __init__(self, app_name):
self.app_name = app_name
self.wsgi_app = None
def start(self):
self.wsgi_app = _run_wsgi(self.app_name)
def wait(self):
self.wsgi_app.wait()
class NeutronApiService(WsgiService):
"""Class for neutron-api service."""
def __init__(self, app_name):
profiler.setup('neutron-server', cfg.CONF.host)
super(NeutronApiService, self).__init__(app_name)
@classmethod
def create(cls, app_name='neutron'):
# Setup logging early
config.setup_logging()
service = cls(app_name)
return service
def serve_wsgi(cls):
try:
service = cls.create()
service.start()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unrecoverable error: please check log '
'for details.'))
registry.notify(resources.PROCESS, events.BEFORE_SPAWN, service)
return service
class RpcWorker(neutron_worker.NeutronWorker):
"""Wraps a worker to be handled by ProcessLauncher"""
start_listeners_method = 'start_rpc_listeners'
def __init__(self, plugins, worker_process_count=1):
super(RpcWorker, self).__init__(
worker_process_count=worker_process_count
)
self._plugins = plugins
self._servers = []
def start(self):
super(RpcWorker, self).start()
for plugin in self._plugins:
if hasattr(plugin, self.start_listeners_method):
try:
servers = getattr(plugin, self.start_listeners_method)()
except NotImplementedError:
continue
self._servers.extend(servers)
def wait(self):
try:
self._wait()
except Exception:
LOG.exception(_LE('done with wait'))
raise
def _wait(self):
LOG.debug('calling RpcWorker wait()')
for server in self._servers:
if isinstance(server, rpc_server.MessageHandlingServer):
LOG.debug('calling wait on %s', server)
server.wait()
else:
LOG.debug('NOT calling wait on %s', server)
LOG.debug('returning from RpcWorker wait()')
def stop(self):
LOG.debug('calling RpcWorker stop()')
for server in self._servers:
if isinstance(server, rpc_server.MessageHandlingServer):
LOG.debug('calling stop on %s', server)
server.stop()
@staticmethod
def reset():
config.reset_service()
class RpcReportsWorker(RpcWorker):
start_listeners_method = 'start_rpc_state_reports_listener'
def _get_rpc_workers():
plugin = manager.NeutronManager.get_plugin()
service_plugins = (
manager.NeutronManager.get_service_plugins().values())
if cfg.CONF.rpc_workers < 1:
cfg.CONF.set_override('rpc_workers', 1)
# If 0 < rpc_workers then start_rpc_listeners would be called in a
# subprocess and we cannot simply catch the NotImplementedError. It is
# simpler to check this up front by testing whether the plugin supports
# multiple RPC workers.
if not plugin.rpc_workers_supported():
LOG.debug("Active plugin doesn't implement start_rpc_listeners")
if 0 < cfg.CONF.rpc_workers:
LOG.error(_LE("'rpc_workers = %d' ignored because "
"start_rpc_listeners is not implemented."),
cfg.CONF.rpc_workers)
raise NotImplementedError()
# passing service plugins only, because core plugin is among them
rpc_workers = [RpcWorker(service_plugins,
worker_process_count=cfg.CONF.rpc_workers)]
if (cfg.CONF.rpc_state_report_workers > 0 and
plugin.rpc_state_report_workers_supported()):
rpc_workers.append(
RpcReportsWorker(
[plugin],
worker_process_count=cfg.CONF.rpc_state_report_workers
)
)
return rpc_workers
def _get_plugins_workers():
# NOTE(twilson) get_service_plugins also returns the core plugin
plugins = manager.NeutronManager.get_unique_service_plugins()
# TODO(twilson) Instead of defaulting here, come up with a good way to
# share a common get_workers default between NeutronPluginBaseV2 and
# ServicePluginBase
return [
plugin_worker
for plugin in plugins if hasattr(plugin, 'get_workers')
for plugin_worker in plugin.get_workers()
]
class AllServicesNeutronWorker(neutron_worker.NeutronWorker):
def __init__(self, services, worker_process_count=1):
super(AllServicesNeutronWorker, self).__init__(worker_process_count)
self._services = services
self._launcher = common_service.Launcher(cfg.CONF)
def start(self):
for srv in self._services:
self._launcher.launch_service(srv)
super(AllServicesNeutronWorker, self).start()
def stop(self):
self._launcher.stop()
def wait(self):
self._launcher.wait()
def reset(self):
self._launcher.restart()
def _start_workers(workers):
process_workers = [
plugin_worker for plugin_worker in workers
if plugin_worker.worker_process_count > 0
]
try:
if process_workers:
worker_launcher = common_service.ProcessLauncher(
cfg.CONF, wait_interval=1.0
)
# add extra process worker and spawn there all workers with
# worker_process_count == 0
thread_workers = [
plugin_worker for plugin_worker in workers
if plugin_worker.worker_process_count < 1
]
if thread_workers:
process_workers.append(
AllServicesNeutronWorker(thread_workers)
)
# dispose the whole pool before os.fork, otherwise there will
# be shared DB connections in child processes which may cause
# DB errors.
session.context_manager.dispose_pool()
for worker in process_workers:
worker_launcher.launch_service(worker,
worker.worker_process_count)
else:
worker_launcher = common_service.ServiceLauncher(cfg.CONF)
for worker in workers:
worker_launcher.launch_service(worker)
return worker_launcher
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unrecoverable error: please check log for '
'details.'))
def start_all_workers():
workers = _get_rpc_workers() + _get_plugins_workers()
return _start_workers(workers)
def start_rpc_workers():
rpc_workers = _get_rpc_workers()
LOG.debug('using launcher for rpc, workers=%s', cfg.CONF.rpc_workers)
return _start_workers(rpc_workers)
def start_plugins_workers():
plugins_workers = _get_plugins_workers()
return _start_workers(plugins_workers)
def _get_api_workers():
workers = cfg.CONF.api_workers
if workers is None:
workers = processutils.get_worker_count()
return workers
def _run_wsgi(app_name):
app = config.load_paste_app(app_name)
if not app:
LOG.error(_LE('No known API applications configured.'))
return
return run_wsgi_app(app)
def run_wsgi_app(app):
server = wsgi.Server("Neutron")
server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host,
workers=_get_api_workers())
LOG.info(_LI("Neutron service started, listening on %(host)s:%(port)s"),
{'host': cfg.CONF.bind_host, 'port': cfg.CONF.bind_port})
return server
class Service(n_rpc.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
*args, **kwargs):
self.binary = binary
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
profiler.setup(binary, host)
super(Service, self).__init__(host, topic, manager=self.manager)
def start(self):
self.manager.init_host()
super(Service, self).start()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
self.manager.after_start()
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None):
"""Instantiates class and passes back application object.
:param host: defaults to cfg.CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'neutron-' part
:param manager: defaults to cfg.CONF.<topic>_manager
:param report_interval: defaults to cfg.CONF.report_interval
:param periodic_interval: defaults to cfg.CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to cfg.CONF.periodic_fuzzy_delay
"""
if not host:
host = cfg.CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('neutron-')[2]
topic = topic.replace("-", "_")
if not manager:
manager = cfg.CONF.get('%s_manager' % topic, None)
if report_interval is None:
report_interval = cfg.CONF.report_interval
if periodic_interval is None:
periodic_interval = cfg.CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = cfg.CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay)
return service_obj
def kill(self):
"""Destroy the service object."""
self.stop()
def stop(self):
super(Service, self).stop()
for x in self.timers:
try:
x.stop()
except Exception:
LOG.exception(_LE("Exception occurs when timer stops"))
self.timers = []
def wait(self):
super(Service, self).wait()
for x in self.timers:
try:
x.wait()
except Exception:
LOG.exception(_LE("Exception occurs when waiting for timer"))
def reset(self):
config.reset_service()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service."""
# Todo(gongysh) report state to neutron server
pass
|
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from pytz import utc
from pandas._libs import tslib
from pandas._libs.tslib import Timestamp, NaT, iNaT
from pandas._libs.tslibs import (
conversion, fields, timezones,
resolution as libresolution)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_datetime64tz_dtype,
is_datetime64_dtype,
_ensure_int64)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.tseries.frequencies import to_offset, DateOffset
from .datetimelike import DatetimeLikeArrayMixin
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
if self.tz is not utc:
values = self._local_timestamps()
if field in self._bool_ops:
if field.endswith(('start', 'end')):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get('startingMonth', kwds.get('month', 12))
result = fields.get_start_end_field(values, field,
self.freqstr, month_kw)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(result, convert='float64')
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArrayMixin(DatetimeLikeArrayMixin):
"""
Assumes that subclass __new__/__init__ defines:
tz
_freq
_data
"""
_bool_ops = ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'is_leap_year']
_object_ops = ['weekday_name', 'freq', 'tz']
# -----------------------------------------------------------------
# Constructors
_attributes = ["freq", "tz"]
@classmethod
def _simple_new(cls, values, freq=None, tz=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if getattr(values, 'dtype', None) is None:
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, freq=freq, tz=tz, **kwargs)
values = np.array(values, copy=False)
if not is_datetime64_dtype(values):
values = _ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result._freq = freq
tz = timezones.maybe_get_tz(tz)
result._tz = timezones.tz_standardize(tz)
return result
def __new__(cls, values, freq=None, tz=None):
if (freq is not None and not isinstance(freq, DateOffset) and
freq != 'infer'):
freq = to_offset(freq)
result = cls._simple_new(values, freq=freq, tz=tz)
if freq == 'infer':
inferred = result.inferred_freq
if inferred:
result.freq = to_offset(inferred)
# NB: Among other things not yet ported from the DatetimeIndex
# constructor, this does not call _deepcopy_if_needed
return result
# -----------------------------------------------------------------
# Descriptive Properties
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return timezones.get_timezone(self.tzinfo)
@property
def offset(self):
"""get/set the frequency of the instance"""
msg = ('{cls}.offset has been deprecated and will be removed '
'in a future version; use {cls}.freq instead.'
.format(cls=type(self).__name__))
warnings.warn(msg, FutureWarning, stacklevel=2)
return self.freq
@offset.setter
def offset(self, value):
"""get/set the frequency of the instance"""
msg = ('{cls}.offset has been deprecated and will be removed '
'in a future version; use {cls}.freq instead.'
.format(cls=type(self).__name__))
warnings.warn(msg, FutureWarning, stacklevel=2)
self.freq = value
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return conversion.is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return libresolution.resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-like Methods
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
-------
tstamp : Timestamp
"""
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = int(length / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = tslib.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box="timestamp")
for v in converted:
yield v
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = timezones.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
def _assert_tzawareness_compat(self, other):
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, 'tzinfo', None)
if is_datetime64tz_dtype(other):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects.')
elif other_tz is None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects')
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike_dti(self, other):
"""subtraction of two DatetimeIndexes"""
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = self_i8 - other_i8
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('timedelta64[ns]')
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self):
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
values = self.asi8
indexer = values.argsort()
result = conversion.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
def tz_convert(self, tz):
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
normalized : same type as self
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : str {'infer', 'NaT', 'raise'} or bool array,
default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
errors : {'raise', 'coerce'}, default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified time zone (e.g. due to a transition from
or to DST time)
- 'coerce' will return NaT if the timestamp can not be converted
to the specified time zone
.. versionadded:: 0.19.0
Returns
-------
result : same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq='D')
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
"""
if self.tz is not None:
if tz is None:
new_dates = conversion.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = conversion.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous,
errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self):
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : string, default None (English locale)
locale determining the language in which to return the month name
Returns
-------
month_names : Index
Index of month names
.. versionadded:: 0.23.0
"""
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : string, default None (English locale)
locale determining the language in which to return the day name
Returns
-------
month_names : Index
Index of day names
.. versionadded:: 0.23.0
"""
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'day_name',
locale=locale)
result = self._maybe_mask_results(result)
return result
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and self.tz is not utc:
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="time")
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and self.tz is not utc:
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="date")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
"Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
"""
Indicator for whether the date is the last day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values. For
DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Indicator for whether the date is the first day
of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> dates
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> dates.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_end
array([False, True, False], dtype=bool)
""")
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False], dtype=bool)
>>> dates = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""")
def to_julian_date(self):
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import re
from flask import render_template
from flask_pluginengine import render_plugin_template
from indico.core import signals
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.vc.forms import VCPluginSettingsFormBase
from indico.modules.vc.models.vc_rooms import VCRoomLinkType
from indico.util.decorators import classproperty
from indico.util.string import remove_accents
from indico.web.flask.templating import get_overridable_template_name
from indico.web.forms.base import FormDefaults
PREFIX_RE = re.compile('^vc_')
class VCPluginMixin(object):
settings_form = VCPluginSettingsFormBase
default_settings = {'notification_emails': []}
acl_settings = {'acl', 'managers'}
#: the :class:`IndicoForm` to use for the videoconference room form
vc_room_form = None
#: the :class:`IndicoForm` to use for the videoconference room attach form
vc_room_attach_form = None
#: the readable name of the VC plugin
friendly_name = None
def init(self):
super(VCPluginMixin, self).init()
if not self.name.startswith('vc_'):
raise Exception('Videoconference plugins must be named vc_*')
self.connect(signals.users.merged, self._merge_users)
@property
def service_name(self):
return PREFIX_RE.sub('', self.name)
@property
def logo_url(self):
raise NotImplementedError('VC plugin must have a logo URL')
@property
def icon_url(self):
raise NotImplementedError('VC plugin must have an icon URL')
@classproperty
@staticmethod
def category():
from indico.core.plugins import PluginCategory
return PluginCategory.videoconference
def get_vc_room_form_defaults(self, event):
return {
'name': re.sub(r'[^\w_-]', '_', remove_accents(event.title, reencode=False)),
'show': True,
'linking': 'event',
'contribution': '',
'block': ''
}
def get_vc_room_attach_form_defaults(self, event):
return {
'room': None,
'contribution': None,
'block': None,
'linking': 'event',
'show': True
}
def get_notification_cc_list(self, action, vc_room, event):
return set()
def get_notification_bcc_list(self, action, vc_room, event):
return set(self.settings.get('notification_emails', set()))
def render_form(self, **kwargs):
"""Renders the videoconference room form
:param kwargs: arguments passed to the template
"""
return render_template('vc/manage_event_create_room.html', **kwargs)
def render_info_box(self, vc_room, event_vc_room, event, **kwargs):
"""Renders the information shown in the expandable box of a VC room row
:param vc_room: the VC room object
:param event_vc_room: the association of an event and a VC room
:param event: the event with the current VC room attached to it
:param kwargs: arguments passed to the template
"""
return render_plugin_template('{}:info_box.html'.format(self.name), plugin=self, event_vc_room=event_vc_room,
event=event, vc_room=vc_room, settings=self.settings, **kwargs)
def render_manage_event_info_box(self, vc_room, event_vc_room, event, **kwargs):
"""Renders the information shown in the expandable box on a VC room in the management area
:param vc_room: the VC room object
:param event_vc_room: the association of an event and a VC room
:param event: the event with the current VC room attached to it
:param kwargs: arguments passed to the template
"""
return render_plugin_template('{}:manage_event_info_box.html'.format(self.name), plugin=self,
event_vc_room=event_vc_room, event=event, vc_room=vc_room,
settings=self.settings, **kwargs)
def render_buttons(self, vc_room, event_vc_room, **kwargs):
"""Renders a list of plugin specific buttons (eg: Join URL, etc) in the management area
:param vc_room: the VC room object
:param event_vc_room: the association of an event and a VC room
:param kwargs: arguments passed to the template
"""
name = get_overridable_template_name('management_buttons.html', self, core_prefix='vc/')
return render_template(name, plugin=self, vc_room=vc_room, event_vc_room=event_vc_room, **kwargs)
def render_event_buttons(self, vc_room, event_vc_room, **kwargs):
"""Renders a list of plugin specific buttons (eg: Join URL, etc) in the event page
:param vc_room: the VC room object
:param event_vc_room: the association of an event and a VC room
:param kwargs: arguments passed to the template
"""
name = get_overridable_template_name('event_buttons.html', self, core_prefix='vc/')
return render_template(name, plugin=self, vc_room=vc_room, event_vc_room=event_vc_room,
event=event_vc_room.event, **kwargs)
def create_form(self, event, existing_vc_room=None, existing_event_vc_room=None):
"""Creates the videoconference room form
:param event: the event the videoconference room is for
:param existing_vc_room: a vc_room from which to retrieve data for the form
:param \*\*kwargs: extra data to pass to the form if an existing vc room is passed
:return: an instance of an :class:`IndicoForm` subclass
"""
if existing_vc_room and existing_event_vc_room:
kwargs = {
'name': existing_vc_room.name,
'linking': existing_event_vc_room.link_type.name,
'show': existing_event_vc_room.show
}
if existing_event_vc_room.link_type == VCRoomLinkType.contribution:
kwargs['contribution'] = existing_event_vc_room.contribution_id
elif existing_event_vc_room.link_type == VCRoomLinkType.block:
kwargs['block'] = existing_event_vc_room.session_block_id
data = existing_vc_room.data
data.update(existing_event_vc_room.data)
defaults = FormDefaults(data, **kwargs)
else:
defaults = FormDefaults(self.get_vc_room_form_defaults(event))
with self.plugin_context():
return self.vc_room_form(prefix='vc-', obj=defaults, event=event, vc_room=existing_vc_room)
def update_data_association(self, event, vc_room, event_vc_room, data):
contribution_id = data.pop('contribution')
block_id = data.pop('block')
link_type = VCRoomLinkType[data.pop('linking')]
if link_type == VCRoomLinkType.event:
event_vc_room.link_object = event
elif link_type == VCRoomLinkType.contribution:
event_vc_room.link_object = Contribution.get_one(contribution_id)
elif link_type == VCRoomLinkType.block:
event_vc_room.link_object = SessionBlock.get_one(block_id)
event_vc_room.vc_room = vc_room
event_vc_room.show = data.pop('show')
if event_vc_room.data is None:
event_vc_room.data = {}
def update_data_vc_room(self, vc_room, data):
if 'name' in data:
vc_room.name = data.pop('name')
if vc_room.data is None:
vc_room.data = {}
def create_room(self, vc_room, event):
raise NotImplementedError('Plugin must implement create_room()')
def can_manage_vc_rooms(self, user, event):
"""Checks if a user can manage vc rooms on an event"""
if self.can_manage_vc(user):
return True
if not self.settings.acls.get('acl'): # everyone has access
return True
return self.settings.acls.contains_user('acl', user)
def can_manage_vc_room(self, user, room):
"""Checks if a user can manage a vc room"""
return (user.is_admin or
self.can_manage_vc(user) or
any(evt_assoc.event.can_manage(user) for evt_assoc in room.events))
def can_manage_vc(self, user):
"""Checks if a user has management rights on this VC system"""
if user.is_admin:
return True
return self.settings.acls.contains_user('managers', user)
def _merge_users(self, target, source, **kwargs):
self.settings.acls.merge_users(target, source)
|
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db import transaction as db_transaction
from django.http import Http404
from django.http.response import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy, reverse
from django.views import View
from django.views.generic import CreateView, ListView, DeleteView
from hordak.forms import SimpleTransactionForm, TransactionForm, LegFormSet
from hordak.forms.transactions import CurrencyTradeForm
from hordak.models import StatementLine, Leg, Transaction
class TransactionCreateView(LoginRequiredMixin, CreateView):
""" View for creation of simple transactions.
This functionality is provided by :class:`hordak.models.Account.transfer_to()`,
see the method's documentation for additional details.
Examples:
.. code-block:: python
urlpatterns = [
...
url(r'^transactions/create/$', TransactionCreateView.as_view(), name='transactions_create'),
]
"""
form_class = SimpleTransactionForm
success_url = reverse_lazy("hordak:accounts_list")
template_name = "hordak/transactions/transaction_create.html"
class CurrencyTradeView(LoginRequiredMixin, CreateView):
form_class = CurrencyTradeForm
success_url = reverse_lazy("hordak:accounts_list")
template_name = "hordak/transactions/currency_trade.html"
def get_form_kwargs(self):
kwargs = super(CurrencyTradeView, self).get_form_kwargs()
kwargs.pop("instance")
return kwargs
class TransactionsListView(LoginRequiredMixin, ListView):
"""View for listing transactions
"""
model = Transaction
template_name = "hordak/transactions/transaction_list.html"
context_object_name = "transactions"
ordering = ["-date", "-pk"]
class LegsListView(LoginRequiredMixin, ListView):
"""View for listing legs
"""
model = Leg
template_name = "hordak/transactions/leg_list.html"
context_object_name = "legs"
ordering = ["-transaction__date", "-transaction__pk", "-pk"]
class TransactionDeleteView(LoginRequiredMixin, DeleteView):
model = Transaction
slug_url_kwarg = "uuid"
slug_field = "uuid"
template_name = "hordak/transactions/transaction_delete.html"
context_object_name = "transaction"
success_url = reverse_lazy("hordak:accounts_list")
class TransactionsReconcileView(LoginRequiredMixin, ListView):
""" Handle rendering and processing in the reconciliation view
Note that this only extends ListView, and we implement the form
processing functionality manually.
Examples:
.. code-block:: python
urlpatterns = [
...
url(r'^transactions/reconcile/$', TransactionsReconcileView.as_view(), name='transactions_reconcile'),
]
"""
template_name = "hordak/transactions/reconcile.html"
model = StatementLine
paginate_by = 50
context_object_name = "statement_lines"
ordering = ["-date", "-pk"]
success_url = reverse_lazy("hordak:accounts_list")
def get_uuid(self):
return self.request.POST.get("reconcile") or self.request.GET.get("reconcile")
def get_object(self, queryset=None):
# Get any Statement Line instance that was specified
if queryset is None:
queryset = self.get_queryset()
uuid = self.get_uuid()
if not uuid:
return None
queryset = queryset.filter(uuid=uuid, transaction=None)
try:
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404("No unreconciled statement line found for {}".format(uuid))
return obj
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(TransactionsReconcileView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# Make sure the ListView gets setup
self.get(self.request, *self.args, **self.kwargs)
# Check form validity
transaction_form = self.get_transaction_form()
leg_formset = self.get_leg_formset()
if transaction_form.is_valid() and leg_formset.is_valid():
return self.form_valid(transaction_form, leg_formset)
else:
return self.form_invalid(transaction_form, leg_formset)
def form_valid(self, transaction_form, leg_formset):
with db_transaction.atomic():
# Save the transaction
transaction_form.instance.date = self.object.date
transaction = transaction_form.save()
# Create the inbound transaction leg
bank_account = self.object.statement_import.bank_account
amount = self.object.amount * -1
Leg.objects.create(
transaction=transaction,
account=bank_account,
amount=amount,
# Note that bank accounts can only have one currency
amount_currency=bank_account.currencies[0],
)
# We need to create a new leg formset in order to pass in the
# transaction we just created (required as the new legs must
# be associated with the new transaction)
leg_formset = self.get_leg_formset(instance=transaction)
assert leg_formset.is_valid()
leg_formset.save()
# Now point the statement line to the new transaction
self.object.transaction = transaction
self.object.save()
self.object = None
return self.render_to_response(self.get_context_data())
def form_invalid(self, transaction_form, leg_formset):
return self.render_to_response(
self.get_context_data(transaction_form=transaction_form, leg_formset=leg_formset)
)
def get_context_data(self, **kwargs):
# If a Statement Line has been selected for reconciliation,
# then add the forms to the context
if self.object:
kwargs.update(
transaction_form=self.get_transaction_form(),
leg_formset=self.get_leg_formset(),
reconcile_line=self.object,
)
return super(TransactionsReconcileView, self).get_context_data(**kwargs)
def get_transaction_form(self):
return TransactionForm(
data=self.request.POST or None, initial=dict(description=self.object.description)
)
def get_leg_formset(self, **kwargs):
return LegFormSet(data=self.request.POST or None, statement_line=self.object, **kwargs)
class UnreconcileView(LoginRequiredMixin, View):
def post(self, request, uuid):
statement_line = get_object_or_404(StatementLine, uuid=uuid)
statement_line.transaction.delete()
return HttpResponseRedirect(reverse("hordak:transactions_reconcile"))
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import timedelta
import numpy as np
from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp,
compat, concat, option_context)
from pandas.compat import u
from pandas import _np_version_under1p14
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.tests.frame.common import TestData
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas as pd
class TestDataFrameDataTypes(TestData):
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
assert result['a'].dtype == np.bool_
assert result['b'].dtype == np.int32
assert result['c'].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result['a'].dtype == np.object_
assert result['b'].dtype == np.float64
assert result['c'].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(
np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series(
'object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(
np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series(
'int32:dense', index=list("abc")))
odict = compat.OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]),
index=[1, 2, 3])
ex_dtypes = pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)]))
ex_ftypes = pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')]))
assert_series_equal(df.dtypes, ex_dtypes)
assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
assert_series_equal(df[:0].dtypes, ex_dtypes)
assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame({'A': date_range('20130101', periods=3),
'B': date_range('20130101', periods=3,
tz='US/Eastern'),
'C': date_range('20130101', periods=3, tz='CET')})
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series([np.dtype('datetime64[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('datetime64[ns, CET]')],
['A', 'B', 'C'])
assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = compat.OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_),
('b', np.float_),
('c', np.float_)])))
assert_series_equal(df.iloc[:, 2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_),
('b', np.float_),
('c', np.float_)])))
def test_select_dtypes_include_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd', 'k']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=['timedelta'])
ei = df[['b', 'c', 'd']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, 'category'],
exclude=['timedelta'])
ei = df[['b', 'c', 'd', 'f']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetime'])
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetime64'])
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetimetz'])
ei = df[['h', 'i']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(include=['period']))
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number)
ei = df[['b', 'c', 'd', 'k']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='datetime')
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='datetime64')
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='category')
ei = df[['f']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(include='period'))
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(exclude=np.number)
ei = df[['a', 'e', 'f', 'g', 'h', 'i', 'j']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude='category')
ei = df[['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j', 'k']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(exclude='period'))
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number, exclude='floating')
ei = df[['b', 'c', 'k']]
assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number,
exclude=['floating', 'timedelta'])
ei = df[['b', 'c']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, 'category'],
exclude='floating')
ei = df[['b', 'c', 'f', 'k']]
assert_frame_equal(ri, ei)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assert_raises_regex(ValueError, 'at least one of '
'include or exclude '
'must be nonempty'):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assert_raises_regex(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assert_raises_regex(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=['datetime64[ns]'])
expected = df3.reindex(columns=[])
assert_frame_equal(result, expected)
def test_select_dtypes_str_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
string_dtypes = set((str, 'str', np.string_, 'S1',
'unicode', np.unicode_, 'U1'))
try:
string_dtypes.add(unicode)
except NameError:
pass
for dt in string_dtypes:
with tm.assert_raises_regex(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(include=[dt])
with tm.assert_raises_regex(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(exclude=[dt])
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assert_raises_regex(TypeError, 'data type.'
'*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def test_select_dtypes_typecodes(self):
# GH 11990
df = mkdf(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes['AllFloat'])
assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series(dict((k, v.dtype)
for k, v in compat.iteritems(self.mixed_frame)),
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_na', True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result, Series({0: np.dtype('int64')}))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A='float32:dense',
B='float32:dense',
C='float16:dense',
D='float64:dense')).sort_values()
result = frame.ftypes.sort_values()
assert_series_equal(result, expected)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
assert (list(set(s.dtype.name for
_, s in compat.iteritems(df)))[0] == v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345., dtype='float16')
mn['big_float'] = np.array(123456789101112., dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns=['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns=['A', 'B', 'C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_cast_nan_inf_int(self):
# GH14265, check nan and inf raise error when converting to int
types = [np.int32, np.int64]
values = [np.nan, np.inf]
msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer'
for this_type in types:
for this_val in values:
df = DataFrame([this_val])
with tm.assert_raises_regex(ValueError, msg):
df.astype(this_type)
def test_astype_str(self):
# GH9757
a = Series(date_range('2010-01-04', periods=5))
b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern'))
c = Series([Timedelta(x, unit='d') for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({'a': a, 'b': b, 'c': c, 'd': d, 'e': e})
# datetimelike
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
result = df.astype(tt)
expected = DataFrame({
'a': list(map(tt, map(lambda x: Timestamp(x)._date_repr,
a._values))),
'b': list(map(tt, map(Timestamp, b._values))),
'c': list(map(tt, map(lambda x: Timedelta(x)
._repr_base(format='all'), c._values))),
'd': list(map(tt, d._values)),
'e': list(map(tt, e._values)),
})
assert_frame_equal(result, expected)
# float/nan
# 11302
# consistency in astype(str)
for tt in set([str, compat.text_type]):
result = DataFrame([np.NaN]).astype(tt)
expected = DataFrame(['nan'])
assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(tt)
if _np_version_under1p14:
# < 1.14 truncates
expected = DataFrame(['1.12345678901'])
else:
# >= 1.14 preserves the full repr
expected = DataFrame(['1.1234567890123457'])
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range('2010-01-04', periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(['1.0', '2', '3.14', '4', '5.4'])
df = DataFrame({'a': a, 'b': b, 'c': c, 'd': d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({'b': 'str', 'd': 'float32'})
result = df.astype(dt1)
expected = DataFrame({
'a': a,
'b': Series(['0', '1', '2', '3', '4']),
'c': c,
'd': Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype='float32')})
assert_frame_equal(result, expected)
assert_frame_equal(df, original)
dt2 = dtype_class({'b': np.float32, 'c': 'float32', 'd': np.float64})
result = df.astype(dt2)
expected = DataFrame({
'a': a,
'b': Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype='float32'),
'c': Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype='float32'),
'd': Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype='float64')})
assert_frame_equal(result, expected)
assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({'a': str, 'b': str, 'c': str, 'd': str})
assert_frame_equal(df.astype(dt3),
df.astype(str))
assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({'b': str, 2: str})
dt5 = dtype_class({'e': str})
pytest.raises(KeyError, df.astype, dt4)
pytest.raises(KeyError, df.astype, dt5)
assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
assert_frame_equal(df, equiv)
assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
assert_frame_equal(df, equiv)
assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name='a')
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name='b')
a2 = Series([0, 1, 2, 3, 4], name='a')
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(['1', '2', '3', '4', '5'], dtype='str', name='a')
b_str = Series(['0.1', '0.2', '0.4', '0.6', '0.8'], dtype=str,
name='b')
a2_str = Series(['0', '1', '2', '3', '4'], dtype='str', name='a')
expected = concat([a1_str, b_str, a2_str], axis=1)
assert_frame_equal(result, expected)
result = df.astype({'a': 'str'})
expected = concat([a1_str, b, a2_str], axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("cls", [
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype
])
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ['a', 'a', 'b', 'c']})
xpr = "Expected an instance of {}".format(cls.__name__)
with tm.assert_raises_regex(TypeError, xpr):
df.astype({"A": cls})
with tm.assert_raises_regex(TypeError, xpr):
df['A'].astype(cls)
def test_timedeltas(self):
df = DataFrame(dict(A=Series(date_range('2012-1-1', periods=3,
freq='D')),
B=Series([timedelta(days=i) for i in range(3)])))
result = df.get_dtype_counts().sort_values()
expected = Series(
{'datetime64[ns]': 1, 'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series(
{'datetime64[ns]': 2, 'timedelta64[ns]': 1}).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2,
'timedelta64[ns]': 1,
'int64': 1}).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
with tm.assert_produces_warning(FutureWarning):
df.astype(np.int8, raise_on_error=False)
df.astype(np.int8, errors='ignore')
class TestDataFrameDatetimeWithTZ(TestData):
def test_interleave(self):
# interleave with object
result = self.tzframe.assign(D='foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')],
['foo', 'foo', 'foo']], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = self.tzframe.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
def test_astype(self):
# astype
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]],
dtype=object).T
result = self.tzframe.astype(object)
assert_frame_equal(result, DataFrame(
expected, index=self.tzframe.index, columns=self.tzframe.columns))
result = self.tzframe.astype('datetime64[ns]')
expected = DataFrame({'A': date_range('20130101', periods=3),
'B': (date_range('20130101', periods=3,
tz='US/Eastern')
.tz_convert('UTC')
.tz_localize(None)),
'C': (date_range('20130101', periods=3,
tz='CET')
.tz_convert('UTC')
.tz_localize(None))})
expected.iloc[1, 1] = pd.NaT
expected.iloc[1, 2] = pd.NaT
assert_frame_equal(result, expected)
def test_astype_str(self):
# str formatting
result = self.tzframe.astype(str)
expected = DataFrame([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']],
columns=self.tzframe.columns)
tm.assert_frame_equal(result, expected)
result = str(self.tzframe)
assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 '
'2013-01-01 00:00:00+01:00') in result
assert ('1 2013-01-02 '
'NaT NaT') in result
assert ('2 2013-01-03 2013-01-03 00:00:00-05:00 '
'2013-01-03 00:00:00+01:00') in result
|
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014-2015 Develer S.r.L.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import argparse
import fnmatch
import glob
import multiprocessing
import os
import os.path
import shutil
import sys
import sdk
#
# Paths
#
iswin = sys.platform.startswith("win")
HERE = os.path.abspath(os.path.dirname(__file__))
HOME = os.path.expanduser('~')
PYQT_LICENSE_FILE = os.path.join(HERE, 'pyqt-commercial.sip')
QT_LICENSE_FILE = os.path.join(HERE, 'qt-license.txt')
SUPPORT_DIR = os.path.join(HERE, 'support')
EXECUTABLE_EXT = ".exe" if iswin else ""
def check_bash():
try:
sdk.sh("bash", "--version")
except:
sdk.die("ERROR: unable to run 'bash', check your PATH")
def main():
args = parse_command_line()
# Prepare the build plan
# plan :: (component_name, build_function, abs_source_directory_path)
plan = []
def add_to_plan(plan, component_name, build_f, source_directory):
plan.append((component_name, build_f, source_directory))
add_to_plan(plan, 'icu', build_icu, args.with_icu_sources)
add_to_plan(plan, 'qt', build_qt, args.with_qt_sources)
add_to_plan(plan, 'sip', build_sip, args.with_sip_sources)
add_to_plan(plan, 'pyqt', build_pyqt, args.with_pyqt_sources)
# If user specified some packages on the command line, build only those
if args.packages != 'all':
plan = [entry for entry in plan if entry[0] in args.packages]
# Get this installation's layout
layout = sdk.get_layout(sdk.platform_root(args.install_root))
# Setup build environment
prep(layout)
# --only-merge stops the build here.
if args.only_merge:
merge(layout)
return
# --shell stops the build here.
if args.shell:
sdk.start_subshell()
return
# --only-scripts stops the build here.
if args.only_scripts:
install_scripts(args.install_root)
return
# Build
build(plan, layout, args.debug, args.profile)
merge(layout)
install_scripts(args.install_root)
def parse_command_line():
args_parser = argparse.ArgumentParser()
def check_source_dir(glob_pattern):
sdk.print_box("Sources discovery for %r..." % glob_pattern)
sources_pattern = os.path.join(HERE, 'sources', glob_pattern)
sources_pattern_platform = os.path.join(sdk.platform_root('sources'), glob_pattern)
globs = glob.glob(sources_pattern) + glob.glob(sources_pattern_platform)
candidates = [d for d in globs if os.path.isdir(d)]
if len(candidates) == 1:
return candidates[0]
elif len(candidates) > 1:
argparse.ArgumentTypeError(
"Too many candidates for %s: %s" % (glob_pattern, ", ".join(candidates)))
else:
argparse.ArgumentTypeError("%r not found, provide an existing folder" % glob_pattern)
args_parser.add_argument('-d', '--debug', action='store_true')
args_parser.add_argument('-k', '--shell', action='store_true',
help="starts a shell just before starting the build")
args_parser.add_argument('-m', '--only-merge', action='store_true',
help="Merge user provided files from ./merge")
args_parser.add_argument('-n', '--only-scripts', action='store_true',
help='Skip build step, update install scripts only')
args_parser.add_argument('-p', '--profile', type=sdk.maybe(sdk.ajson, {}),
help="json config file for Qt build")
args_parser.add_argument('-r', '--install-root', help="default: %(default)s", type=sdk.mkdir,
default=os.path.join(HERE, '_out'))
args_parser.add_argument('-c', '--with-icu-sources', type=sdk.adir)
args_parser.add_argument('-t', '--with-pyqt-sources', type=sdk.adir)
args_parser.add_argument('-q', '--with-qt-sources', type=sdk.adir)
args_parser.add_argument('-s', '--with-sip-sources', type=sdk.adir)
args_parser.add_argument('packages', metavar='PACKAGES', nargs='*',
choices=['all', 'icu', 'qt', 'sip', 'pyqt'], default='all',
help="Build only selected packages from {%(choices)s}, default: %(default)s")
args = args_parser.parse_args()
def has_package(pkg):
return (pkg in args.packages or "all" in args.packages)
if args.with_icu_sources is None:
args.with_icu_sources = check_source_dir('icu*')
if args.with_pyqt_sources is None:
args.with_pyqt_sources = check_source_dir('PyQt-*')
if args.with_qt_sources is None:
args.with_qt_sources = check_source_dir('qt-everywhere-*')
if args.with_sip_sources is None:
args.with_sip_sources = check_source_dir('sip-*')
if has_package("icu"):
if sys.platform == 'win32':
check_bash()
# to rebuild Qt.
if has_package("qt"):
if not args.profile:
sdk.die('I need a profile in to rebuild Qt!')
return args
def prep(layout):
make_install_root_skel(layout)
sdk_configure = __import__('configure')
sdk_configure.setup_environment(layout)
def make_install_root_skel(layout):
for path in layout.values():
if not os.path.isdir(path):
os.makedirs(path)
def build(recipes, layout, debug, profile):
for pkg, build_f, src_dir in recipes:
sdk.print_box('Building %s' % pkg, src_dir)
with sdk.chdir(src_dir):
build_f(layout, debug, profile)
def merge(layout):
merge_dir = os.path.join(HERE, 'merge')
if os.path.isdir(merge_dir):
sdk.print_box('Merging %s' % merge_dir, 'into', layout['root'])
sdk.copy_tree(merge_dir, layout['root'])
else:
print('No files to merge.')
def install_scripts(install_root):
sdk.print_box('Installing configure.py and sdk.py to:', install_root)
shutil.copyfile(
os.path.join(HERE, 'configure.py'), os.path.join(install_root, 'configure.py'))
shutil.copyfile(os.path.join(HERE, 'sdk.py'), os.path.join(install_root, 'sdk.py'))
#
# Build recipes
# Function prototype: def f(layout, debug, profile) :: dict -> bool -> dict
#
def build_icu(layout, debug, profile):
# NOTE: We always build ICU in release mode since we don't usually need to debug it.
os.chdir('source')
if sys.platform == 'darwin':
sdk.sh('chmod', '+x', 'configure', 'runConfigureICU')
sdk.sh('bash', 'runConfigureICU', 'MacOSX', '--prefix=%s' %
layout['root'], '--disable-debug', '--enable-release')
sdk.sh('make')
sdk.sh('make', 'install')
elif sys.platform == 'linux2':
sdk.sh('chmod', '+x', 'configure', 'runConfigureICU')
sdk.sh('bash', 'runConfigureICU', 'Linux', '--prefix=%s' %
layout['root'], '--disable-debug', '--enable-release')
sdk.sh('make')
sdk.sh('make', 'install')
elif sys.platform == 'win32':
# Convert native install_root path to one accepted by Cygwin (e.g.: /cygdrive/c/foo/bar)
cy_install_root = layout['root'].replace('\\', '/')
cy_install_root = cy_install_root.replace('C:/', '/cygdrive/c/')
sdk.sh('bash', 'runConfigureICU', 'Cygwin/MSVC', '--prefix=%s' %
cy_install_root, '--disable-debug', '--enable-release')
sdk.sh('bash', '-c', 'make') # We have to use GNU make here, so no make() wrapper...
sdk.sh('bash', '-c', 'make install')
else:
sdk.die('You have to rebuild ICU only on OS X or Windows')
def install_qt_requirements():
def is_ubuntu():
if os.path.exists("/etc/lsb-release"):
with open("/etc/lsb-release") as f:
for line in f:
if line.startswith("DISTRIB_ID"):
return "Ubuntu" in line
return False
if is_ubuntu():
sdk.sh("sudo", "apt-get", "install", "-y",
"libfontconfig1-dev",
"libfreetype6-dev",
"libx11-dev",
"libxcursor-dev",
"libxext-dev",
"libxfixes-dev",
"libxft-dev",
"libxi-dev",
"libxrandr-dev",
"libxrender-dev",
"libgl1-mesa-dev",
"libglu1-mesa-dev",
"libcups2-dev",
"python-dev")
def build_qt(layout, debug, profile):
def qtmake(*args):
if sys.platform == 'win32':
make(*args)
else:
try:
sdk.sh('jom', '/VERSION')
except:
make(*args)
else:
sdk.sh('jom', '-j%s' % str(multiprocessing.cpu_count() + 1), *args)
if os.path.isfile(QT_LICENSE_FILE):
qt_license = '-commercial'
shutil.copy(QT_LICENSE_FILE, os.path.join(HOME, ".qt-license"))
else:
qt_license = '-opensource'
# Bootstrap configure.exe on Windows so that we can re-use the UNIX source
# tarball which doesn't have configure.exe pre-built like the Win32
# version. To do this, we 'touch' qtbase\.gitignore.
if is_qt5():
with open(os.path.join('qtbase', '.gitignore'), 'w'):
pass
# Configure
qt_configure_args = [
'-confirm-license',
'-prefix', layout['root'],
'-shared',
qt_license
]
# Load build profile
qt_configure_args.extend(profile['qt']['common'])
if sys.platform in profile['qt']:
qt_configure_args.extend(profile['qt'][sys.platform])
# Enable proper release + debug .pdb files on Windows
if debug:
if sys.platform == 'win32':
mkspec_file_name = 'qt%s-msvc2008-release-with-debuginfo.conf' % profile[
'qt']['version']
shutil.copyfile(os.path.join(SUPPORT_DIR, mkspec_file_name), os.path.join(
'mkspecs', 'win32-msvc2008', 'qmake.conf'))
qt_configure_args.append('-release')
else:
qt_configure_args.append('-debug')
else:
qt_configure_args.append('-release')
# Have the compiler find our local copy of ICU
if sys.platform == 'darwin' or sys.platform == 'win32':
qt_configure_args.extend(['-I', os.path.join(layout['root'], 'include')])
qt_configure_args.extend(['-L', os.path.join(layout['root'], 'lib')])
if sys.platform == 'win32':
# VC++ doesn't have stdint.h (required by WebKit)
shutil.copy(os.path.join(SUPPORT_DIR, 'stdint-msvc.h'),
os.path.join(layout['include'], 'stdint.h'))
# Add gnuwin32 to the PATH (required by WebKit)
qt_source_dir = os.path.abspath(os.getcwd())
os.environ['PATH'] = os.pathsep.join([
os.path.join(qt_source_dir, 'gnuwin32', 'bin'),
os.environ['PATH']
])
# Enable parallel build
qt_configure_args.append('-mp')
# Build Qt 4 with clang on OS X
if sys.platform == 'darwin' and os.path.isfile('/usr/bin/clang') and not is_qt5():
qt_configure_args.extend(['-platform', 'unsupported/macx-clang'])
# Install build requirements (Ubuntu only)
install_qt_requirements()
# Build
configure_qt(*qt_configure_args)
qtmake()
qtmake('install')
# Delete all libtool's .la files
for root, _, filenames in os.walk(layout['root']):
for filename in fnmatch.filter(filenames, '*.la'):
os.remove(os.path.join(root, filename))
def build_sip(layout, debug, profile):
configure_args = [
'--bindir', layout['bin'],
'--destdir', layout['python'],
'--incdir', layout['include'],
'--sipdir', layout['sip'],
]
set_pyqt_debug_flags(debug, configure_args)
configure(*configure_args)
make()
make('install')
def build_pyqt(layout, debug, profile):
if os.path.isfile(PYQT_LICENSE_FILE):
shutil.copyfile(PYQT_LICENSE_FILE, os.path.join('sip', 'pyqt-commercial.sip'))
# Configure
configure_args = [
'--assume-shared',
'--bindir', layout['bin'],
'--concatenate',
'--concatenate-split=4',
'--confirm-license',
'--destdir', layout['python'],
'--no-docstrings',
'--sip', layout['sip'],
'--verbose',
] + profile['pyqt']['common']
# Configure-ng
configure_ng_args = [
'--assume-shared',
'--bindir', layout['bin'],
'--concatenate',
'--concatenate-split=4',
'--confirm-license',
'--destdir', layout['python'],
'--no-docstrings',
'--sip', os.path.join(layout['bin'], 'sip'+EXECUTABLE_EXT),
'--sipdir', layout['sip'],
'--verbose',
] + profile['pyqt']['common']
set_pyqt_debug_flags(debug, configure_args)
set_pyqt_debug_flags(debug, configure_ng_args)
# Build
# configure(*configure_args)
configure_ng(*configure_ng_args)
make()
make('install')
#
# Utility methods
#
def is_qt5():
return os.path.isdir('qtbase')
def configure(*args):
sdk.sh(sys.executable, 'configure.py', *args)
def configure_ng(*args):
sdk.sh(sys.executable, 'configure-ng.py', *args)
def configure_qt(*args):
if sys.platform == 'win32':
configure_exe = 'configure.bat' if is_qt5() else 'configure.exe'
else:
configure_exe = './configure'
sdk.sh(configure_exe, *args)
def make(*args):
if sys.platform == 'win32':
sdk.sh('nmake', *args)
else:
sdk.sh('make', '-j%s' % str(multiprocessing.cpu_count() + 1), *args)
def set_pyqt_debug_flags(debug, configure_args):
if debug:
if sys.platform == 'win32':
configure_args.append('CFLAGS=/O2 /Zi')
configure_args.append('CXXFLAGS=/O2 /Zi')
configure_args.append('LFLAGS=/DEBUG /INCREMENTAL:NO /OPT:REF')
else:
configure_args.append('--debug')
#
# Entry point
#
if __name__ == '__main__':
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-GPU tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.contrib.distribute.python import values
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import device_util
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
def _get_distribution_strategy(self):
devices = ["/device:CPU:0", "/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
if context.num_gpus() > 1:
devices = ["/device:GPU:0", "/device:GPU:1"]
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
return mirrored_strategy.MirroredStrategy(devices)
def testMinimizeLossEager(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_minimize_loss_eager(self._get_distribution_strategy())
def testMinimizeLossGraph(self):
soft_placement = not GPU_TEST
print("testMinimizeLossGraph soft_placement:", soft_placement)
self._test_minimize_loss_graph(
self._get_distribution_strategy(), soft_placement=soft_placement)
def testMapReduce(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_map_reduce(self._get_distribution_strategy())
def testDeviceIndex(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_device_index(self._get_distribution_strategy())
def testTowerId(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_tower_id(self._get_distribution_strategy())
def testNumTowers(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self.assertEqual(2, self._get_distribution_strategy().num_towers)
@test_util.run_in_graph_and_eager_modes
def testCallAndMergeExceptions(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_call_and_merge_exceptions(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes
def testRunRegroupError(self):
def run_fn(device_id):
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(device_id))
dist = self._get_distribution_strategy()
with dist.scope(), self.assertRaises(AssertionError):
dist.call_for_each_tower(run_fn, dist.worker_device_index)
@test_util.run_in_graph_and_eager_modes
def testReduceToCpu(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
def run_fn(device_id):
return device_id
dist = self._get_distribution_strategy()
with dist.scope():
result = dist.call_for_each_tower(run_fn, dist.worker_device_index)
reduced = dist.reduce(
variable_scope.VariableAggregation.SUM,
result,
destinations="/device:CPU:0")
unwrapped = dist.unwrap(reduced)
self.assertEqual(1, len(unwrapped))
expected = sum(range(len(dist.worker_devices)))
self.assertEqual(expected, self.evaluate(unwrapped[0]))
@test_util.run_in_graph_and_eager_modes
def testReduceOnlyFirstTowerUpdates(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
def run_fn(device_id):
return constant_op.constant(3 + 5 * device_id)
dist = self._get_distribution_strategy()
with dist.scope():
result = dist.call_for_each_tower(run_fn, dist.worker_device_index)
reduced = dist.reduce(
variable_scope.VariableAggregation.ONLY_FIRST_TOWER,
result,
destinations="/device:CPU:0")
unwrapped = dist.unwrap(reduced)
self.assertEqual(1, len(unwrapped))
self.assertEqual(3, self.evaluate(unwrapped[0]))
@test_util.run_in_graph_and_eager_modes()
def testReduceToMultipleDestinations(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
devices = ["/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
reduced = dist.reduce(
variable_scope.VariableAggregation.SUM,
1.0,
destinations=["/device:CPU:0", "/device:GPU:0"])
unwrapped = dist.unwrap(reduced)
self.assertEqual(2, len(unwrapped))
self.assertEqual(1.0, self.evaluate(unwrapped[0]))
class MirroredStrategyVariableCreationTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Enough GPUs not available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testSingleVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by `dist.call_for_each_tower`.
v = variable_scope.variable(1.0, name="foo")
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testUnnamedVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v = variable_scope.variable(1.0)
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# Default name of "Variable" will be used.
self.assertEquals("Variable:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariables(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
for i in range(5):
vs.append(variable_scope.variable(1.0, name="foo" + str(i)))
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for i, v in enumerate(result):
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals("foo" + str(i) + ":0", v.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariablesWithSameCanonicalName(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
vs.append(variable_scope.variable(1.0, name="foo/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for v in result:
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals(4, len(result))
self.assertEquals("foo/bar:0", result[0].name)
self.assertEquals("foo_1/bar:0", result[1].name)
self.assertEquals("foo_1/bar_1:0", result[2].name)
self.assertEquals("foo/bar_1:0", result[3].name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableWithSameCanonicalNameAcrossThreads(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(device_id):
v = variable_scope.variable(1.0, name="foo_" + str(device_id))
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# The resulting mirrored variable will use the name from the first device.
self.assertEquals("foo_0:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithLayers(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(features):
with variable_scope.variable_scope("common"):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# This will pause the current thread, and execute the other thread.
distribution_strategy_context.get_tower_context().merge_call(
lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias),
(layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
features = dist.distribute_dataset(
lambda: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10)
).make_one_shot_iterator().get_next()
with dist.scope():
result = dist.call_for_each_tower(
model_fn, features, run_concurrently=False)
suffixes = ["", "_1", "_2"]
for (kernel, bias), suffix in zip(result, suffixes):
self.assertIsInstance(kernel, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/kernel:0", kernel.name)
self.assertIsInstance(bias, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/bias:0", bias.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v0 = variable_scope.variable(1.0, name="var0", aggregation=None)
with variable_scope.variable_scope("common"):
v1 = variable_scope.variable(1.0, name="var1")
# This will pause the current thread, and execute the other thread.
distribution_strategy_context.get_tower_context().merge_call(
lambda _: _)
v2 = variable_scope.variable(
1.0,
name="var2",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.variable(
1.0,
name="var3",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
v = variable_scope.variable(1.0, name="var-main0")
self.assertEquals("var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEquals("var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEquals("common/var1:0", v1.name)
self.assertIsInstance(v2, values.TowerLocalVariable)
self.assertEquals("common/var2:0", v2.name)
self.assertEquals(variable_scope.VariableAggregation.SUM, v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEquals("common/var3:0", v3.name)
self.assertEquals(variable_scope.VariableAggregation.MEAN, v3.aggregation)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithGetVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v0 = variable_scope.get_variable("var0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var1", [1])
# This will pause the current thread, and execute the other thread.
distribution_strategy_context.get_tower_context().merge_call(
lambda _: _)
v2 = variable_scope.get_variable(
"var2", [1],
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.get_variable(
"var3", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEquals("main/var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEquals("main/var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEquals("main/common/var1:0", v1.name)
self.assertIsInstance(v2, values.TowerLocalVariable)
self.assertEquals("main/common/var2:0", v2.name)
self.assertEquals(variable_scope.VariableAggregation.SUM,
v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEquals("main/common/var3:0", v3.name)
self.assertEquals(variable_scope.VariableAggregation.MEAN,
v3.aggregation)
@test_util.run_in_graph_and_eager_modes(config=config)
def testOnlyFirstTowerUpdatesVariables(self):
self._skip_eager_if_gpus_less_than(1)
def create_fn():
aggregation = variable_scope.VariableAggregation.ONLY_FIRST_TOWER
v0 = variable_scope.variable(
2.0,
name="on_read",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=aggregation)
v1 = variable_scope.variable(
3.0,
name="on_write",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=aggregation)
return v0, v1
devices = ["/device:GPU:0", "/device:CPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
v0, v1 = dist.call_for_each_tower(create_fn, run_concurrently=False)
self.evaluate(v0.initializer)
self.assertEqual(2.0, self.evaluate(v0.get(devices[0])))
self.assertEqual(2.0, self.evaluate(v0.get(devices[1])))
self.assertEqual(2.0, self.evaluate(dist.read_var(v0)))
self.evaluate(v1.initializer)
self.assertEqual(3.0, self.evaluate(v1.get(devices[0])))
self.assertEqual(3.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0, self.evaluate(dist.read_var(v1)))
# Update using the assign_add member function.
def update_member_fn(device_id):
update0 = v0.assign_add(5.0 * (device_id + 1))
update1 = v1.assign_add(7.0 * (device_id + 1))
return update0, update1
update0a, update1a = dist.call_for_each_tower(
update_member_fn, dist.worker_device_index, run_concurrently=False)
# Update "sync on read" variable.
self.evaluate(dist.group(update0a))
self.assertEqual(2.0 + 5.0, self.evaluate(v0.get(devices[0])))
# Writes are not synchronized for "sync on read" variables,
# so device[1] can end up with a different value.
self.assertEqual(2.0 + 2*5.0, self.evaluate(v0.get(devices[1])))
# Always reads from device 0.
self.assertEqual(2.0 + 5.0, self.evaluate(dist.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(dist.group(update1a))
self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[0])))
# Writes are synchronized for v1, only the argument to assign_add on
# device[0] is used.
self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0 + 7.0, self.evaluate(dist.read_var(v1)))
# Update using state_ops.assign_add global function.
def update_state_ops_fn(device_id):
update0 = state_ops.assign_add(v0, 11.0 * (device_id + 1))
update1 = state_ops.assign_add(v1, 13.0 * (device_id + 1))
return update0, update1
update0b, update1b = dist.call_for_each_tower(
update_state_ops_fn, dist.worker_device_index, run_concurrently=False)
self.evaluate(dist.group(update0b))
# Update "sync on read" variable.
self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(v0.get(devices[0])))
self.assertEqual(2.0 + 2*5.0 + 2*11.0, self.evaluate(v0.get(devices[1])))
self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(dist.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(dist.group(update1b))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[0])))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(dist.read_var(v1)))
@test_util.run_in_graph_and_eager_modes(config=config)
def testNoneSynchronizationWithGetVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.NONE)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNoneSynchronizationWithVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.NONE)
@test_util.run_in_graph_and_eager_modes(config=config)
def testInvalidSynchronizationWithVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable synchronization mode: Invalid for "
"variable: v"):
variable_scope.variable(1.0, name="v", synchronization="Invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testInvalidAggregationWithGetVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testInvalidAggregationWithVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testThreeDevices(self):
self._skip_eager_if_gpus_less_than(2)
def model_fn():
v = variable_scope.variable(1.0, name="foo")
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNonMatchingVariableCreation(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(name):
v = variable_scope.variable(1.0, name=name)
distribution_strategy_context.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
names = values.DistributedValues({
"/device:CPU:0": "foo",
"/device:GPU:0": "bar"
})
with self.assertRaises(RuntimeError):
_ = dist.call_for_each_tower(model_fn, names, run_concurrently=False)
@test_util.run_in_graph_and_eager_modes(config=config)
def testTowerLocalVariable(self):
self._skip_eager_if_gpus_less_than(1)
all_v_sum = {}
all_v_mean = {}
components_sum = {}
components_mean = {}
def model_fn(device_id):
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v_mean = variable_scope.variable(
4.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
self.assertTrue(isinstance(v_mean, values.TowerLocalVariable))
updates = [v_sum.assign_add(2.0 + device_id),
v_mean.assign(6.0 * device_id)]
all_v_sum[device_id] = v_sum
all_v_mean[device_id] = v_mean
c_sum = v_sum.get()
c_mean = v_mean.get()
components_sum[device_id] = c_sum
components_mean[device_id] = c_mean
self.assertIsNot(v_sum, c_sum)
self.assertIsNot(v_mean, c_mean)
return updates, v_sum, v_mean, c_sum, c_mean
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
# Create "sum" and "mean" versions of TowerLocalVariables.
ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (
dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False))
# Should see the same wrapping instance in all towers.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Regroup should recover the same wrapper.
self.assertIs(ret_v_sum, regrouped_sum)
self.assertIs(ret_v_mean, regrouped_mean)
self.assertIsNot(components_sum[0], components_sum[1])
self.assertIsNot(components_mean[0], components_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([y for x in ret_ops for y in dist.unwrap(x)])
expected_sum = 0.0
expected_mean = 0.0
for i, d in enumerate(dist.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())
v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(dist.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all towers (whether you use
# read_var(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(dist.read_var(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(dist.read_var(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
distribution_strategy_context.get_tower_context().merge_call(
lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("main/foo/" + name + ":0", v0.name)
self.assertEquals("main/tower_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
distribution_strategy_context.get_tower_context().merge_call(
lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("foo/" + name + ":0", v0.name)
self.assertEquals("tower_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes when creating variables. We test both methods of creating variables
# to make sure that we have the same variable names in both cases.
def testNameScopeWithVariable(self):
def in_cross_tower(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = distribution_strategy_context.get_tower_context().merge_call(
in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("main/a:0", a0.name)
self.assertEquals("main/a/replica_1:0", a1.name)
self.assertEquals("main/b:0", b0.name)
self.assertEquals("main/b/replica_1:0", b1.name)
self.assertEquals("main/foo/c:0", c0.name)
self.assertEquals("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self):
def in_cross_tower(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = distribution_strategy_context.get_tower_context().merge_call(
in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("a:0", a0.name)
self.assertEquals("a/replica_1:0", a1.name)
self.assertEquals("b:0", b0.name)
self.assertEquals("b/replica_1:0", b1.name)
self.assertEquals("c:0", c0.name)
self.assertEquals("c/replica_1:0", c1.name)
def testDynamicRnnVariables(self):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32)
return outputs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
# Two variables are created by the RNN layer.
self.assertEquals(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = dist.unwrap(v)
self.assertStartsWith(v1.name, "tower_1/")
@test_util.run_in_graph_and_eager_modes(config=config)
def testTowerLocalVariableUpdate(self):
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
return v_sum
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1"])
def update(var, value):
return var.assign(value)
with dist.scope():
ret_v_sum = dist.call_for_each_tower(model_fn, run_concurrently=False)
update_ops = dist.update(ret_v_sum, update, 5.0, grouped=False)
# Initialize variables.
self.evaluate(variables.global_variables_initializer())
# Assert that the aggregated value of the tower local vars is the sum of
# the individual values before running the update ops.
self.assertEquals(1.0, self.evaluate(
ret_v_sum.get(dist._devices[0]).read_value()))
self.assertEquals(2.0, self.evaluate(ret_v_sum))
# Apply updates.
self.evaluate(update_ops)
# Assert that the aggregated value of the tower local vars is the sum of
# the individual values after running the update ops.
self.assertEquals(5.0, self.evaluate(
ret_v_sum.get(dist._devices[0]).read_value()))
self.assertEquals(10.0, self.evaluate(ret_v_sum))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in tower and cross tower context.
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Enough GPUs not available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContextWithoutAggregationType(self):
# Test that we always have an aggregation type set on the mirrored variable
# if we assign to it in tower mode.
self._skip_eager_if_gpus_less_than(1)
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "You must specify an aggregation method to update a "
"MirroredVariable in Tower Context."):
self.evaluate(dist.unwrap(dist.call_for_each_tower(model_fn)))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContextWithSum(self):
# Test that we don't reduce a non-per-device value with the "sum"
# aggregation type.
self._skip_eager_if_gpus_less_than(1)
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given aggregation VariableAggregation.SUM."):
self.evaluate(dist.unwrap(dist.call_for_each_tower(model_fn)))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarCrossTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(1.0, name="foo")
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEquals(6.0, mirrored_var_result)
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
distribution_strategy_context.get_tower_context().tower_id,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(0.5, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContextWithSingleValue(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(5.0, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignAddMirroredVarCrossTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(1.0, name="foo")
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEquals(7.0, mirrored_var_result)
self.assertEquals(7.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEquals(7.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEquals(9.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEquals(9.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignAddMirroredVarTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
distribution_strategy_context.get_tower_context().tower_id,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(1.5, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignAddMirroredVarTowerContextWithSingleValue(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(6.0, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignSubMirroredVarCrossTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(5.0, name="foo")
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEquals(3.0, mirrored_var_result)
self.assertEquals(3.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
self.assertEquals(3.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignSubMirroredVarTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
distribution_strategy_context.get_tower_context().tower_id,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(4.5, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignSubMirroredVarTowerContextWithSingleValue(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(4.0, self.evaluate(mirrored_var))
class MirroredAndTowerLocalVariableInitializerTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def testAssignMirroredVarInitializer(self):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignTowerLocalVarInitializer(self):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
return v_sum
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
tower_local_var = dist.call_for_each_tower(model_fn)
self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
self.assertFalse(self.evaluate(tower_local_var.is_initialized()))
self.evaluate(tower_local_var.initializer)
self.assertTrue(self.evaluate(tower_local_var.is_initialized()))
class TowerLocalVariableAssignTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Not enough GPUs available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignTowerLocalVarSumAggregation(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
tower_local_var = dist.call_for_each_tower(model_fn,
run_concurrently=False)
self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
self.evaluate(variables.global_variables_initializer())
# Each tower has a value of 1.0 assigned to it in tower context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the towers.
self.assertEqual(2.0, self.evaluate(dist.read_var(tower_local_var)))
# Assigning 6.0 in cross tower context will assign a value of
# 6.0/num_towers to each tower.
tlv_ops = tower_local_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the tower local var we should get the assigned value back.
# The value on all the towers are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(dist.read_var(tower_local_var)))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignTowerLocalVarMeanAggregation(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
tower_local_var = dist.call_for_each_tower(model_fn,
run_concurrently=False)
self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
self.evaluate(variables.global_variables_initializer())
# Each tower has a value of 1.0 assigned to it in tower context.
# When we read the value using `read_var` we should see the MEAN of values
# on all towers which is the value assigned in tower context.
self.assertEqual(1.0, self.evaluate(dist.read_var(tower_local_var)))
tlv_ops = tower_local_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the tower local var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(dist.read_var(tower_local_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
class MirroredStrategyDefunTest(test.TestCase):
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Not enough GPUs available for this test in eager mode.")
def _call_and_check(self, model_fn, inputs, expected_result, defuns,
two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = dist.call_for_each_tower(model_fn, mock_model, *inputs,
run_concurrently=False)
for device in devices:
device_result = values.select_device(device, result)
device_expected_result = values.select_device(device, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# PolymorphicFunctions are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_device_graph_functions = dist.call_for_each_tower(
defun.get_concrete_function,
mock_model, *inputs, run_concurrently=False)
for device in devices:
graph_function = per_device_graph_functions.get(device=device)
self.assertEqual(set(mock_model.variables),
set(graph_function.graph.variables))
@test_util.run_in_graph_and_eager_modes()
def testVariableInDefun(self):
self._skip_eager_if_gpus_less_than(1)
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(model_fn, [], 2.5, [times_two])
@test_util.run_in_graph_and_eager_modes()
def testVariableInNestedDefun(self):
self._skip_eager_if_gpus_less_than(1)
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(model_fn, [], 3.5, [times_two, two_x_plus_one])
@test_util.run_in_graph_and_eager_modes()
def testTwoVariablesInNestedDefun(self):
self._skip_eager_if_gpus_less_than(1)
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(model_fn, [], 5.5, [fn1, fn2], two_variables=True)
@test_util.run_in_graph_and_eager_modes()
def testGradientTapeOverNestedDefuns(self):
self._skip_eager_if_gpus_less_than(1)
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v.get() for v in mock_model.variables])
return grads
self._call_and_check(model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
@test_util.run_in_graph_and_eager_modes()
def testPassPerDevice(self):
self._skip_eager_if_gpus_less_than(1)
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
factors = values.PerDevice({"CPU:0": 5.0, "GPU:0": 3.0})
expected_result = values.PerDevice({"CPU:0": 5.0 * 1.25,
"GPU:0": 3.0 * 1.25})
self._call_and_check(fn1, [factors], expected_result, [fn1])
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _get_distribution_strategy(self):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
strategy = mirrored_strategy.MirroredStrategy(num_gpus=context.num_gpus())
strategy.configure(cluster_spec=cluster_spec)
return strategy
def testMinimizeLossGraph(self):
self._test_minimize_loss_graph(self._get_distribution_strategy(),
learning_rate=0.05)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def testMinimizeLossGraph(self):
strategy = mirrored_strategy.MirroredStrategy(
num_gpus_per_worker=context.num_gpus())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
if __name__ == "__main__":
test.main()
|
|
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
devnull = open(os.devnull, "w")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open(os.devnull, "w")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling bitcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HP 3PAR Storage array.
This driver requires 3.1.2 MU2 firmware on the 3PAR array.
You will need to install the python hp3parclient.
sudo pip install hp3parclient
Set the following in the cinder.conf file to enable the
3PAR iSCSI Driver along with the required flags:
volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
"""
import sys
from hp3parclient import exceptions as hpexceptions
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
from cinder.volume.drivers.san import san
VERSION = 1.1
LOG = logging.getLogger(__name__)
DEFAULT_ISCSI_PORT = 3260
class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
"""OpenStack iSCSI driver to enable 3PAR storage array.
Version history:
1.0 - Initial driver
1.1 - QoS, extend volume, multiple iscsi ports, remove domain,
session changes, faster clone, requires 3.1.2 MU2 firmware.
"""
def __init__(self, *args, **kwargs):
super(HP3PARISCSIDriver, self).__init__(*args, **kwargs)
self.common = None
self.configuration.append_config_values(hpcommon.hp3par_opts)
self.configuration.append_config_values(san.san_opts)
def _init_common(self):
return hpcommon.HP3PARCommon(self.configuration)
def _check_flags(self):
"""Sanity check to ensure we have required options set."""
required_flags = ['hp3par_api_url', 'hp3par_username',
'hp3par_password', 'san_ip', 'san_login',
'san_password']
self.common.check_flags(self.configuration, required_flags)
@utils.synchronized('3par', external=True)
def get_volume_stats(self, refresh):
self.common.client_login()
stats = self.common.get_volume_stats(refresh)
stats['storage_protocol'] = 'iSCSI'
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or self.__class__.__name__
self.common.client_logout()
return stats
def do_setup(self, context):
self.common = self._init_common()
self._check_flags()
# map iscsi_ip-> ip_port
# -> iqn
# -> nsp
self.iscsi_ips = {}
temp_iscsi_ip = {}
# use the 3PAR ip_addr list for iSCSI configuration
if len(self.configuration.hp3par_iscsi_ips) > 0:
# add port values to ip_addr, if necessary
for ip_addr in self.configuration.hp3par_iscsi_ips:
ip = ip_addr.split(':')
if len(ip) == 1:
temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
elif len(ip) == 2:
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
else:
msg = _("Invalid IP address format '%s'") % ip_addr
LOG.warn(msg)
# add the single value iscsi_ip_address option to the IP dictionary.
# This way we can see if it's a valid iSCSI IP. If it's not valid,
# we won't use it and won't bother to report it, see below
if (self.configuration.iscsi_ip_address not in temp_iscsi_ip):
ip = self.configuration.iscsi_ip_address
ip_port = self.configuration.iscsi_port
temp_iscsi_ip[ip] = {'ip_port': ip_port}
# get all the valid iSCSI ports from 3PAR
# when found, add the valid iSCSI ip, ip port, iqn and nsp
# to the iSCSI IP dictionary
# ...this will also make sure ssh works.
iscsi_ports = self.common.get_ports()['iSCSI']
for (ip, iscsi_info) in iscsi_ports.iteritems():
if ip in temp_iscsi_ip:
ip_port = temp_iscsi_ip[ip]['ip_port']
self.iscsi_ips[ip] = {'ip_port': ip_port,
'nsp': iscsi_info['nsp'],
'iqn': iscsi_info['iqn']
}
del temp_iscsi_ip[ip]
# if the single value iscsi_ip_address option is still in the
# temp dictionary it's because it defaults to $my_ip which doesn't
# make sense in this context. So, if present, remove it and move on.
if (self.configuration.iscsi_ip_address in temp_iscsi_ip):
del temp_iscsi_ip[self.configuration.iscsi_ip_address]
# lets see if there are invalid iSCSI IPs left in the temp dict
if len(temp_iscsi_ip) > 0:
msg = _("Found invalid iSCSI IP address(s) in configuration "
"option(s) hp3par_iscsi_ips or iscsi_ip_address '%s.'") % \
(", ".join(temp_iscsi_ip))
LOG.warn(msg)
if not len(self.iscsi_ips) > 0:
msg = _('At least one valid iSCSI IP address must be set.')
raise exception.InvalidInput(reason=(msg))
self.common.do_setup(context)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_flags()
@utils.synchronized('3par', external=True)
def create_volume(self, volume):
self.common.client_login()
metadata = self.common.create_volume(volume)
self.common.client_logout()
return {'metadata': metadata}
@utils.synchronized('3par', external=True)
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
self.common.client_login()
new_vol = self.common.create_cloned_volume(volume, src_vref)
self.common.client_logout()
return {'metadata': new_vol}
@utils.synchronized('3par', external=True)
def delete_volume(self, volume):
self.common.client_login()
self.common.delete_volume(volume)
self.common.client_logout()
@utils.synchronized('3par', external=True)
def create_volume_from_snapshot(self, volume, snapshot):
"""
Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
self.common.client_login()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
self.common.client_logout()
return {'metadata': metadata}
@utils.synchronized('3par', external=True)
def create_snapshot(self, snapshot):
self.common.client_login()
self.common.create_snapshot(snapshot)
self.common.client_logout()
@utils.synchronized('3par', external=True)
def delete_snapshot(self, snapshot):
self.common.client_login()
self.common.delete_snapshot(snapshot)
self.common.client_logout()
@utils.synchronized('3par', external=True)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host.
This driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value:
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_protal': '127.0.0.1:3260',
'volume_id': 1,
}
}
Steps to export a volume on 3PAR
* Get the 3PAR iSCSI iqn
* Create a host on the 3par
* create vlun on the 3par
"""
self.common.client_login()
# we have to make sure we have a host
host = self._create_host(volume, connector)
# now that we have a host, create the VLUN
vlun = self.common.create_vlun(volume, host)
self.common.client_logout()
iscsi_ip = self._get_iscsi_ip(host['name'])
iscsi_ip_port = self.iscsi_ips[iscsi_ip]['ip_port']
iscsi_target_iqn = self.iscsi_ips[iscsi_ip]['iqn']
info = {'driver_volume_type': 'iscsi',
'data': {'target_portal': "%s:%s" %
(iscsi_ip, iscsi_ip_port),
'target_iqn': iscsi_target_iqn,
'target_lun': vlun['lun'],
'target_discovered': True
}
}
return info
@utils.synchronized('3par', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
self.common.client_login()
self.common.terminate_connection(volume,
connector['host'],
connector['initiator'])
self.common.client_logout()
def _create_3par_iscsi_host(self, hostname, iscsi_iqn, domain, persona_id):
"""Create a 3PAR host.
Create a 3PAR host, if there is already a host on the 3par using
the same iqn but with a different hostname, return the hostname
used by 3PAR.
"""
cmd = 'createhost -iscsi -persona %s -domain %s %s %s' % \
(persona_id, domain, hostname, iscsi_iqn)
out = self.common._cli_run(cmd, None)
if out and len(out) > 1:
return self.common.parse_create_host_error(hostname, out)
return hostname
def _modify_3par_iscsi_host(self, hostname, iscsi_iqn):
# when using -add, you can not send the persona or domain options
self.common._cli_run('createhost -iscsi -add %s %s'
% (hostname, iscsi_iqn), None)
def _create_host(self, volume, connector):
"""Creates or modifies existing 3PAR host."""
# make sure we don't have the host already
host = None
hostname = self.common._safe_hostname(connector['host'])
cpg = self.common.get_cpg(volume, allowSnap=True)
domain = self.common.get_domain(cpg)
try:
host = self.common._get_3par_host(hostname)
if not host['iSCSIPaths']:
self._modify_3par_iscsi_host(hostname, connector['initiator'])
host = self.common._get_3par_host(hostname)
except hpexceptions.HTTPNotFound:
# get persona from the volume type extra specs
persona_id = self.common.get_persona_type(volume)
# host doesn't exist, we have to create it
hostname = self._create_3par_iscsi_host(hostname,
connector['initiator'],
domain,
persona_id)
host = self.common._get_3par_host(hostname)
return host
@utils.synchronized('3par', external=True)
def create_export(self, context, volume):
pass
@utils.synchronized('3par', external=True)
def ensure_export(self, context, volume):
pass
@utils.synchronized('3par', external=True)
def remove_export(self, context, volume):
pass
def _get_iscsi_ip(self, hostname):
"""Get an iSCSI IP address to use.
Steps to determine which IP address to use.
* If only one IP address, return it
* If there is an active vlun, return the IP associated with it
* Return IP with fewest active vluns
"""
if len(self.iscsi_ips) == 1:
return self.iscsi_ips.keys()[0]
# if we currently have an active port, use it
nsp = self._get_active_nsp(hostname)
if nsp is None:
# no active vlun, find least busy port
nsp = self._get_least_used_nsp(self._get_iscsi_nsps())
if nsp is None:
msg = _("Least busy iSCSI port not found, "
"using first iSCSI port in list.")
LOG.warn(msg)
return self.iscsi_ips.keys()[0]
return self._get_ip_using_nsp(nsp)
def _get_iscsi_nsps(self):
"""Return the list of candidate nsps."""
nsps = []
for value in self.iscsi_ips.values():
nsps.append(value['nsp'])
return nsps
def _get_ip_using_nsp(self, nsp):
"""Return IP assiciated with given nsp."""
for (key, value) in self.iscsi_ips.items():
if value['nsp'] == nsp:
return key
def _get_active_nsp(self, hostname):
"""Return the active nsp, if one exists, for the given host."""
result = self.common._cli_run('showvlun -a -host %s' % hostname, None)
if result:
# first line is header
result = result[1:]
for line in result:
info = line.split(",")
if info and len(info) > 4:
return info[4]
def _get_least_used_nsp(self, nspss):
""""Return the nsp that has the fewest active vluns."""
# return only the nsp (node:server:port)
result = self.common._cli_run('showvlun -a -showcols Port', None)
# count the number of nsps (there is 1 for each active vlun)
nsp_counts = {}
for nsp in nspss:
# initialize counts to zero
nsp_counts[nsp] = 0
current_least_used_nsp = None
if result:
# first line is header
result = result[1:]
for line in result:
nsp = line.strip()
if nsp in nsp_counts:
nsp_counts[nsp] = nsp_counts[nsp] + 1
# identify key (nsp) of least used nsp
current_smallest_count = sys.maxint
for (nsp, count) in nsp_counts.iteritems():
if count < current_smallest_count:
current_least_used_nsp = nsp
current_smallest_count = count
return current_least_used_nsp
def extend_volume(self, volume, new_size):
self.common.extend_volume(volume, new_size)
|
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
from grako.model import NodeWalker
try:
import pygraphviz as pgv
except:
raise
__all__ = ['draw']
def draw(filename, grammar):
traverser = GraphvizWalker()
traverser.walk(grammar)
traverser.draw(filename)
class GraphvizWalker(NodeWalker):
def __init__(self):
super(GraphvizWalker, self).__init__()
self.top_graph = pgv.AGraph(directed=True,
rankdir='LR',
packMode='clust',
splines='true'
)
self.stack = [self.top_graph]
self.node_count = 0
@property
def graph(self):
return self.stack[-1]
def draw(self, filename):
self.graph.layout(prog='dot')
# WARNING: neato generated graphics hang my GPU
# self.graph.layout(prog='neato')
self.graph.draw(filename)
def push_graph(self, name=None, **attr):
if name is None:
self.node_count += 1
name = 'g%d' % self.node_count
self.stack.append(self.graph.add_subgraph(name, **attr))
return self.graph
def pop_graph(self):
self.stack.pop()
pass
def node(self, name, id=None, **attr):
if id is None:
self.node_count += 1
id = 'n%d' % self.node_count
else:
try:
return self.graph.get_node(id)
except KeyError:
pass
self.graph.add_node(id, **attr)
n = self.graph.get_node(id)
n.attr['label'] = name
# n.attr['shape'] = 'circle'
return n
def tnode(self, name, **attr):
return self.node(name, **attr)
def dot(self):
n = self.node('')
n.attr['shape'] = 'point'
n.attr['size'] = 0.0000000001
n.attr['label'] = ''
return n
def start_node(self):
return self.dot()
def ref_node(self, name):
n = self.node(name)
n.attr['shape'] = 'box'
return n
def rule_node(self, name, **attr):
n = self.node(name, **attr)
n.attr['shape'] = 'parallelogram'
return n
def end_node(self):
n = self.node('')
n.attr['shape'] = 'point'
n.attr['width'] = 0.1
return n
def edge(self, s, e, **attr):
self.graph.add_edge(s, e, **attr)
edge = self.graph.get_edge(s, e)
# edge.attr['arrowhead'] = 'normal'
edge.attr['arrowhead'] = 'none'
return edge
def redge(self, s, e):
edge = self.edge(s, e)
edge.attr['dir'] = 'back'
return edge
def zedge(self, s, e):
edge = self.edge(s, e, len=0.000001)
return edge
def nedge(self, s, e):
return self.edge(s, e, style='invisible', dir='none')
def path(self, p):
self.graph.add_path(p)
def subgraph(self, name, bunch):
self.top_graph.add_subgraph(name)
def concat(*args):
return list(itertools.chain(*args))
def _walk_decorator(self, d):
return self.walk(d.exp)
def _walk__Decorator(self, d):
print('WALKING', type(d))
return self._walk_decorator(d)
def walk_Grammar(self, g):
self.push_graph(g.name + '0')
try:
vrules = [self.walk(r) for r in reversed(g.rules)]
finally:
self.pop_graph()
self.push_graph(g.name + '1')
try:
# link all rule starting nodes with invisible edges
starts = [self.node(r.name, id=r.name) for r in g.rules]
for n1, n2 in zip(starts, starts[1:]):
# self.nedge(n1, n2)
pass
finally:
self.pop_graph()
s, t = vrules[0][0], vrules[-1][1]
return (s, t)
def walk_Rule(self, r):
self.push_graph(r.name)
try:
i, e = self._walk_decorator(r)
s = self.rule_node(r.name, id=r.name)
self.edge(s, i)
t = self.end_node()
self.edge(e, t)
return (s, t)
finally:
self.pop_graph()
def walk_BasedRule(self, r):
return self.walk_Rule(r)
def walk_RuleRef(self, rr):
n = self.ref_node(rr.name)
return (n, n)
def walk_Special(self, s):
n = self.node(s.special)
return (n, n)
def walk_Override(self, o):
return self._walk_decorator(o)
def walk_Named(self, n):
return self._walk_decorator(n)
def walk_NamedList(self, n):
return self._walk_decorator(n)
def walk_Cut(self, c):
# c = self.node('>>')
# return (c, c)
return None
def walk_Optional(self, o):
i, e = self._walk_decorator(o)
ni = self.dot()
ne = self.dot()
self.zedge(ni, i)
self.edge(ni, ne)
self.zedge(e, ne)
return (ni, ne)
def walk_Closure(self, r):
self.push_graph(rankdir='TB')
try:
i, e = self._walk_decorator(r)
ni = self.dot()
self.edge(ni, i)
self.edge(e, ni)
return (ni, ni)
finally:
self.pop_graph()
def walk_PositiveClosure(self, r):
i, e = self._walk_decorator(r)
if i == e:
self.redge(e, i)
else:
self.edge(e, i)
return (i, e)
def walk_Group(self, g):
return self._walk_decorator(g)
def walk_Choice(self, c):
vopt = [self.walk(o) for o in c.options]
ni = self.dot()
ne = self.dot()
for i, e in vopt:
self.edge(ni, i)
self.edge(e, ne)
return (ni, ne)
def walk_Sequence(self, s):
vseq = [self.walk(x) for x in s.sequence]
vseq = [x for x in vseq if x is not None]
i, _ = vseq[0]
_, e = vseq[-1]
if i != e:
bunch = zip([a for _x, a in vseq[:-1]],
[b for b, _y in vseq[1:]])
for n, n1 in bunch:
self.edge(n, n1)
return (i, e)
def walk_Lookahead(self, l):
i, e = self._walk_decorator(l)
n = self.node('&')
self.edge(n, e)
return (n, e)
def walk_LookaheadNot(self, l):
i, e = self._walk_decorator(l)
n = self.node('!')
self.edge(n, e)
return (n, e)
def walk_RuleInclude(self, l):
i, e = self._walk_decorator(l)
n = self.node('>')
self.edge(n, e)
return (n, e)
def walk_Pattern(self, p):
n = self.tnode(p.pattern)
return (n, n)
def walk_Token(self, t):
n = self.tnode(t.token)
return (n, n)
def walk_Void(self, v):
n = self.dot()
return (n, n)
def walk_EOF(self, v):
# n = self.node('$')
# return (n, n)
return None
|
|
"""
This module implements the built-in class and method decorators and their
handling classes.
"""
# Standard library imports
import functools
import inspect
# Local imports
from uplink import arguments, helpers, hooks, interfaces, utils
from uplink.compat import abc
__all__ = [
"headers",
"params",
"form_url_encoded",
"multipart",
"json",
"timeout",
"args",
"response_handler",
"error_handler",
"inject",
]
class MethodAnnotationHandlerBuilder(interfaces.AnnotationHandlerBuilder):
def __init__(self):
self._class_annotations = list()
self._method_annotations = list()
def add_annotation(self, annotation, *args_, **kwargs):
if kwargs.get("is_class", False):
self._class_annotations.append(annotation)
else:
self._method_annotations.append(annotation)
super(MethodAnnotationHandlerBuilder, self).add_annotation(annotation)
return annotation
def copy(self):
clone = MethodAnnotationHandlerBuilder()
clone._class_annotations = list(self._class_annotations)
clone._method_annotations = list(self._method_annotations)
return clone
def build(self):
return MethodAnnotationHandler(
self._class_annotations + self._method_annotations
)
class MethodAnnotationHandler(interfaces.AnnotationHandler):
def __init__(self, method_annotations):
self._method_annotations = list(method_annotations)
@property
def annotations(self):
return iter(self._method_annotations)
def handle_builder(self, request_builder):
for annotation in self._method_annotations:
annotation.modify_request(request_builder)
# TODO: Only decorate consumers
class MethodAnnotation(interfaces.Annotation):
_http_method_blacklist = None
_http_method_whitelist = None
@staticmethod
def _is_consumer_class(c):
return utils.is_subclass(c, interfaces.Consumer)
@classmethod
def supports_http_method(cls, method):
method = method.upper()
if cls._http_method_blacklist is not None:
return method not in cls._http_method_blacklist
if cls._http_method_whitelist is not None:
return method in cls._http_method_whitelist
return True
@classmethod
def _is_relevant_for_builder(cls, builder):
return cls.supports_http_method(builder[1].method)
@classmethod
def _is_static_call(cls, *args_, **kwargs):
if super(MethodAnnotation, cls)._is_static_call(*args_, **kwargs):
return True
try:
is_consumer_class = cls._is_consumer_class(args_[0])
except IndexError:
return False
else:
return is_consumer_class and not (kwargs or args_[1:])
def _modify_request_definition(self, builder, kwargs):
builder.method_handler_builder.add_annotation(self, **kwargs)
def __call__(self, class_or_builder, **kwargs):
if self._is_consumer_class(class_or_builder):
builders = helpers.get_api_definitions(class_or_builder)
builders = filter(self._is_relevant_for_builder, builders)
for name, b in builders:
self(b, is_class=True)
helpers.set_api_definition(class_or_builder, name, b)
elif isinstance(class_or_builder, interfaces.RequestDefinitionBuilder):
self._modify_request_definition(class_or_builder, kwargs)
return class_or_builder
def modify_request(self, request_builder):
pass
class _BaseRequestProperties(MethodAnnotation):
_property_name = None
_delimiter = None
def __init__(self, arg, **kwargs):
if isinstance(arg, list):
self._values = dict(self._split(a) for a in arg)
else:
self._values = dict(arg, **kwargs)
def _split(self, arg):
return map(str.strip, arg.split(self._delimiter))
def modify_request(self, request_builder):
"""Updates header contents."""
request_builder.info[self._property_name].update(self._values)
# noinspection PyPep8Naming
class headers(_BaseRequestProperties):
"""
A decorator that adds static headers for API calls.
.. code-block:: python
@headers({"User-Agent": "Uplink-Sample-App"})
@get("/user")
def get_user(self):
\"""Get the current user\"""
When used as a class decorator, :py:class:`headers` applies to
all consumer methods bound to the class:
.. code-block:: python
@headers({"Accept": "application/vnd.github.v3.full+json"})
class GitHub(Consumer):
...
:py:class:`headers` takes the same arguments as :py:class:`dict`.
Args:
arg: A dict containing header values.
**kwargs: More header values.
"""
def __init__(self, arg=None, **kwargs):
if isinstance(arg, str):
key, value = self._split(arg)
arg = {key: value}
super(headers, self).__init__(arg or {}, **kwargs)
@property
def _delimiter(self):
return ":"
@property
def _property_name(self):
return "headers"
# noinspection PyPep8Naming
class params(_BaseRequestProperties):
"""
A decorator that adds static query parameters for API calls.
.. code-block:: python
@params({"sort": "created"})
@get("/user")
def get_user(self):
\"""Get the current user\"""
When used as a class decorator, :py:class:`params` applies to
all consumer methods bound to the class:
.. code-block:: python
@params({"client_id": "my-app-client-id"})
class GitHub(Consumer):
...
:py:class:`params` takes the same arguments as :py:class:`dict`.
Args:
arg: A dict containing query parameters.
**kwargs: More query parameters.
"""
def __init__(self, arg=None, **kwargs):
if isinstance(arg, str):
arg = arg.split("&")
super(params, self).__init__(arg or {}, **kwargs)
@property
def _property_name(self):
return "params"
@property
def _delimiter(self):
return "="
# noinspection PyPep8Naming
class form_url_encoded(MethodAnnotation):
"""
URL-encodes the request body.
Used on POST/PUT/PATCH request. It url-encodes the body of the
message and sets the appropriate ``Content-Type`` header. Further,
each field argument should be annotated with
:py:class:`uplink.Field`.
Example:
.. code-block:: python
@form_url_encoded
@post("/users/edit")
def update_user(self, first_name: Field, last_name: Field):
\"""Update the current user.\"""
"""
_http_method_blacklist = {"GET"}
_can_be_static = True
# XXX: Let `requests` handle building urlencoded syntax.
# def modify_request(self, request_builder):
# request_builder.info.headers(
# {"Content-Type": "application/x-www-form-urlencoded"}
# )
# noinspection PyPep8Naming
class multipart(MethodAnnotation):
"""
Sends multipart form data.
Multipart requests are commonly used to upload files to a server.
Further, annotate each part argument with :py:class:`Part`.
Example:
.. code-block:: python
@multipart
@put(/user/photo")
def update_user(self, photo: Part, description: Part):
\"""Upload a user profile photo.\"""
"""
_http_method_blacklist = {"GET"}
_can_be_static = True
# XXX: Let `requests` handle building multipart syntax.
# def modify_request(self, request_builder):
# request_builder.info.headers(
# {"Content-Type": "multipart/form-data"}
# )
# noinspection PyPep8Naming
class json(MethodAnnotation):
"""Use as a decorator to make JSON requests.
You can annotate a method argument with :py:class:`uplink.Body`,
which indicates that the argument's value should become the
request's body. :py:class:`uplink.Body` has to be either a dict or a
subclass of py:class:`abc.Mapping`.
Example:
.. code-block:: python
@json
@patch(/user")
def update_user(self, **info: Body):
\"""Update the current user.\"""
You can alternatively use the :py:class:`uplink.Field` annotation to
specify JSON fields separately, across multiple arguments:
Example:
.. code-block:: python
@json
@patch(/user")
def update_user(self, name: Field, email: Field("e-mail")):
\"""Update the current user.\"""
Further, to set a nested field, you can specify the path of the
target field with a tuple of strings as the first argument of
:py:class:`uplink.Field`.
Example:
Consider a consumer method that sends a PATCH request with a JSON
body of the following format:
.. code-block:: json
:emphasize-lines: 3
{
user: {
name: "<User's Name>"
},
}
The tuple :py:obj:`("user", "name")` specifies the path to the
highlighted inner field:
.. code-block:: python
:emphasize-lines: 5
@json
@patch(/user")
def update_user(
self,
new_name: Field(("user", "name"))
):
\"""Update the current user.\"""
"""
_http_method_blacklist = {"GET"}
_can_be_static = True
@staticmethod
def _sequence_path_resolver(path, value, body):
if not path:
raise ValueError("Path sequence cannot be empty.")
for name in path[:-1]:
body = body.setdefault(name, {})
if not isinstance(body, abc.Mapping):
raise ValueError(
"Failed to set nested JSON attribute '%s': "
"parent field '%s' is not a JSON object." % (path, name)
)
body[path[-1]] = value
def modify_request(self, request_builder):
"""Modifies JSON request."""
request_builder.add_transaction_hook(self._hook)
@classmethod
def set_json_body(cls, request_builder):
old_body = request_builder.info.pop("data", {})
if isinstance(old_body, abc.Mapping):
body = request_builder.info.setdefault("json", {})
for path in old_body:
if isinstance(path, tuple):
cls._sequence_path_resolver(path, old_body[path], body)
else:
body[path] = old_body[path]
else:
request_builder.info.setdefault("json", old_body)
__hook = None
@property
def _hook(self):
if self.__hook is None:
self.__hook = hooks.RequestAuditor(self.set_json_body)
return self.__hook
# noinspection PyPep8Naming
class timeout(MethodAnnotation):
"""
Time to wait for a server response before giving up.
When used on other decorators it specifies how long (in secs) a
decorator should wait before giving up.
Example:
.. code-block:: python
@timeout(60)
@get("/user/posts")
def get_posts(self):
\"""Fetch all posts for the current users.\"""
When used as a class decorator, :py:class:`timeout` applies to all
consumer methods bound to the class.
Args:
seconds (int): An integer used to indicate how long should the
request wait.
"""
def __init__(self, seconds):
self._seconds = seconds
def modify_request(self, request_builder):
"""Modifies request timeout."""
request_builder.info["timeout"] = self._seconds
# noinspection PyPep8Naming
class args(MethodAnnotation):
"""
Annotate method arguments for Python 2.7 compatibility.
Arrange annotations in the same order as their corresponding
function arguments.
Example:
.. code-block:: python
@args(Path, Query)
@get("/users/{username})
def get_user(self, username, visibility):
\"""Get a specific user.\"""
Use keyword args to target specific method parameters.
Example:
.. code-block:: python
@args(visibility=Query)
@get("/users/{username})
def get_user(self, username, visibility):
\"""Get a specific user.\"""
Args:
*annotations: Any number of annotations.
**more_annotations: More annotations, targeting specific method
arguments.
"""
def __init__(self, *annotations, **more_annotations):
self._annotations = annotations
self._more_annotations = more_annotations
def __call__(self, obj):
if inspect.isfunction(obj):
handler = arguments.ArgumentAnnotationHandlerBuilder.from_func(obj)
self._helper(handler)
return obj
else:
return super(args, self).__call__(obj)
def _helper(self, builder):
builder.set_annotations(self._annotations, **self._more_annotations)
def modify_request_definition(self, request_definition_builder):
"""Modifies dynamic requests with given annotations"""
self._helper(request_definition_builder.argument_handler_builder)
class _InjectableMethodAnnotation(MethodAnnotation):
def modify_request(self, request_builder):
request_builder.add_transaction_hook(self)
class _BaseHandlerAnnotation(_InjectableMethodAnnotation):
def __new__(cls, func=None, *args, **kwargs):
if func is None:
return lambda f: cls(f, *args, **kwargs)
self = super(_BaseHandlerAnnotation, cls).__new__(cls)
functools.update_wrapper(self, func)
return self
# noinspection PyPep8Naming
class response_handler(_BaseHandlerAnnotation, hooks.ResponseHandler):
"""
A decorator for creating custom response handlers.
To register a function as a custom response handler, decorate the
function with this class. The decorated function should accept a single
positional argument, an HTTP response object:
Example:
.. code-block:: python
@response_handler
def raise_for_status(response):
response.raise_for_status()
return response
Then, to apply custom response handling to a request method, simply
decorate the method with the registered response handler:
Example:
.. code-block:: python
@raise_for_status
@get("/user/posts")
def get_posts(self):
\"""Fetch all posts for the current users.\"""
To apply custom response handling on all request methods of a
:py:class:`uplink.Consumer` subclass, simply decorate the class with
the registered response handler:
Example:
.. code-block:: python
@raise_for_status
class GitHub(Consumer):
...
Lastly, the decorator supports the optional argument
:obj:`requires_consumer`. When this option is set to :obj:`True`,
the registered callback should accept a reference to the
:class:`~Consumer` instance as its leading argument:
Example:
.. code-block:: python
@response_handler(requires_consumer=True)
def raise_for_status(consumer, response):
...
.. versionadded:: 0.4.0
"""
# noinspection PyPep8Naming
class error_handler(_BaseHandlerAnnotation, hooks.ExceptionHandler):
"""
A decorator for creating custom error handlers.
To register a function as a custom error handler, decorate the
function with this class. The decorated function should accept three
positional arguments: (1) the type of the exception, (2) the
exception instance raised, and (3) a traceback instance.
Example:
.. code-block:: python
@error_handler
def raise_api_error(exc_type, exc_val, exc_tb):
# wrap client error with custom API error
...
Then, to apply custom error handling to a request method, simply
decorate the method with the registered error handler:
Example:
.. code-block:: python
@raise_api_error
@get("/user/posts")
def get_posts(self):
\"""Fetch all posts for the current users.\"""
To apply custom error handling on all request methods of a
:py:class:`uplink.Consumer` subclass, simply decorate the class with
the registered error handler:
Example:
.. code-block:: python
@raise_api_error
class GitHub(Consumer):
...
Lastly, the decorator supports the optional argument
:obj:`requires_consumer`. When this option is set to :obj:`True`,
the registered callback should accept a reference to the
:class:`~Consumer` instance as its leading argument:
Example:
.. code-block:: python
@error_handler(requires_consumer=True)
def raise_api_error(consumer, exc_type, exc_val, exc_tb):
...
.. versionadded:: 0.4.0
Note:
Error handlers can not completely suppress exceptions. The
original exception is thrown if the error handler doesn't throw
anything.
"""
# noinspection PyPep8Naming
class inject(_InjectableMethodAnnotation, hooks.TransactionHookChain):
"""
A decorator that applies one or more hooks to a request method.
.. versionadded:: 0.4.0
"""
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Common classes for Simian unit tests.
Contents:
RequestHandlerTest
"""
import tests.appenginesdk
import mox
import stubout
from google.appengine.ext import testbed
from google.apputils import app
from google.apputils import basetest
from tests.simian.mac.common import test_base as test_base
from simian import settings
from simian.mac import models
from simian.mac.common import auth
class GenericContainer(test_base.GenericContainer):
"""Generic data container for testing purposes."""
class RequestHandlerTest(test_base.RequestHandlerTest):
"""Test class for RequestHandler derived classes."""
def setUp(self):
super(RequestHandlerTest, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.setup_env(
USER_EMAIL='user@example.com',
USER_ID='123',
USER_IS_ADMIN='0',
DEFAULT_VERSION_HOSTNAME='example.appspot.com')
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub()
self.testbed.init_user_stub()
self.testbed.init_mail_stub()
settings.ADMINS = ['admin@example.com']
def tearDown(self):
super(RequestHandlerTest, self).tearDown()
self.testbed.deactivate()
def MockDoUserAuth(self, user=None, is_admin=None, fail=False):
"""Mock calling auth.DoUserAuth().
Args:
user: user for DoUserAuth to return.
fail: bool, whether to fail or not
"""
if not 'authDoUserAuth' in self._set_mock:
self.mox.StubOutWithMock(auth, 'DoUserAuth')
self._set_mock['authDoUserAuth'] = 1
if fail:
if is_admin is None:
auth.DoUserAuth().AndRaise(auth.NotAuthenticated)
else:
auth.DoUserAuth(is_admin=is_admin).AndRaise(auth.NotAuthenticated)
else:
if is_admin is None:
auth.DoUserAuth().AndReturn(user)
else:
auth.DoUserAuth(is_admin=is_admin).AndReturn(user)
def MockDoOAuthAuth(self, user=None, is_admin=None, fail=False):
"""Mock calling auth.DoOAuthAuth().
Args:
user: user for DoOAuthAuth to return.
fail: bool, whether to fail or not
"""
if not 'authDoOAuthAuth' in self._set_mock:
self.mox.StubOutWithMock(auth, 'DoOAuthAuth')
self._set_mock['authDoOAuthAuth'] = 1
if fail:
if is_admin is None:
auth.DoOAuthAuth().AndRaise(auth.NotAuthenticated)
else:
auth.DoOAuthAuth(is_admin=is_admin).AndRaise(auth.NotAuthenticated)
else:
if is_admin is None:
auth.DoOAuthAuth().AndReturn(user)
else:
auth.DoOAuthAuth(is_admin=is_admin).AndReturn(user)
def MockDoMunkiAuth(self, fail=False, and_return=None, **kwargs):
"""Mock calling gaeserver.DoMunkiAuth().
Args:
fail: bool, whether to fail or not; calls AndRaise()
and_return: any, variable to pass to AndReturn, default None
kwargs: other options, like require_level=int
"""
munki_auth_module = self.GetTestClassModule().gaeserver
if not hasattr(munki_auth_module, 'DoMunkiAuth'):
raise NotImplementedError('MockDoMunkiAuth for non-Munki handler class')
if not 'authDoMunkiAuth' in self._set_mock:
self.mox.StubOutWithMock(munki_auth_module, 'DoMunkiAuth')
self._set_mock['authDoMunkiAuth'] = 1
if fail:
self.GetTestClassModule().gaeserver.DoMunkiAuth(**kwargs).AndRaise(
munki_auth_module.NotAuthenticated)
else:
self.GetTestClassModule().gaeserver.DoMunkiAuth(**kwargs).AndReturn(
and_return)
def MockDoAnyAuth(self, fail=False, and_return=None):
"""Mock calling auth.DoAnyAuth().
Args:
fail: bool, whether to fail or not
and_return: any, variable to pass to AndReturn, default None
"""
if not 'authDoAnyAuth' in self._set_mock:
self.mox.StubOutWithMock(auth, 'DoAnyAuth')
self._set_mock['authDoAnyAuth'] = 1
if fail:
auth.DoAnyAuth().AndRaise(auth.NotAuthenticated)
else:
auth.DoAnyAuth().AndReturn(and_return)
def MockModelStaticBase(self, model_name, method_name, *args):
"""Mock a model static method, return a mock setup.
Args:
model_name: str, name of model
method_name: str, name of static method on model to call
*args: optional, list of arguments to supply to mock setup
Returns:
a mock setup ready for completion with AndReturn, AndRaise, etc.
"""
test_class_models = self.GetTestClassModule().models
model_class = getattr(test_class_models, model_name)
if not '%s:%s' % (model_name, method_name) in self._set_mock:
mock_model = self.mox.CreateMock(getattr(model_class, method_name))
self.stubs.Set(model_class, method_name, mock_model)
self._set_mock['%s:%s' % (model_name, method_name)] = mock_model
model = self.mox.CreateMockAnything()
return getattr(model_class, method_name)(*args)
def MockModelStatic(self, model_name, method_name, *args):
"""Mock a model static method, return a mocked model.
Args:
same as MockModelStaticBase
Returns:
a new mocked instance of the model
"""
model = self.mox.CreateMockAnything()
self.MockModelStaticBase(model_name, method_name, *args).AndReturn(model)
return model
def MockModelStaticNone(self, model_name, method_name, *args):
"""Mock a model static method, return None.
Used to return "no entity" type responses from static methods.
e.g.
MockModelStaticNone('ModelName', 'get', 12345)
Args:
same as MockModelStaticBase
Returns:
None
"""
model = None
self.MockModelStaticBase(model_name, method_name, *args).AndReturn(model)
return model
def MockModel(self, model_name, *args, **kwargs):
"""Mock creating an instance of a model, and return the mock instance.
Args:
name: str, name of model, like 'Package'
args: list, optional arguments supplied to model instantiation
kwargs: dict, optional arguments supplied to model instantiation
Returns:
a new mocked instance of the model
"""
test_class_models = self.GetTestClassModule().models
if not 'models_%s' % model_name in self._set_mock:
self.mox.StubOutWithMock(
getattr(self.GetTestClassModule(), 'models'),
model_name)
# we need to put back any stubs which MockModelStaticBase placed
for mock in self._set_mock:
if mock.startswith('%s:' % model_name):
self.stubs.Set(
getattr(test_class_models, model_name),
mock.split(':')[1],
self._set_mock[mock])
self._set_mock['models_%s' % model_name] = 1
model = self.mox.CreateMockAnything()
getattr(
self.GetTestClassModule().models,
model_name)(*args, **kwargs).AndReturn(model)
return model
def main(unused_argv):
basetest.main()
|
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os
from xml.dom import pulldom
import logging
import re
log = logging.getLogger("koRNG")
log.setLevel(logging.DEBUG)
from elementtree import XMLTreeBuilder
try:
import cElementTree as ElementTree # effbot's C module
except ImportError:
log.error(
"using element tree and not cElementTree, performace will suffer")
import elementtree.ElementTree as ElementTree # effbot's pure Python module
class NamespaceParser(XMLTreeBuilder.FancyTreeBuilder):
_qname = re.compile("{(.*?)}(.*)")
def start(self, element):
element.namespaces = self.namespaces[:]
qn = self._qname.match(element.tag)
element.ns = qn.group(1)
element.tagName = qn.group(2)
class rng_base_dataset:
def __init__(self):
self.name = None
self.elements = [] # root level elements
self.attributes = []
self.values = []
self.refs = []
def resolveRefs(self, dataset):
for ref in self.refs[:]:
if ref not in list(dataset.defs.keys()):
if ref not in dataset.ref_unresolved:
dataset.ref_unresolved[ref] = []
dataset.ref_unresolved[ref].append(self)
continue
d = dataset.defs[ref]
del self.refs[self.refs.index(ref)]
if d.refs:
d.resolveRefs(dataset)
# grab what we care about from this definition
self.attributes += [a for a in d.attributes if a.name]
self.elements += [e for e in d.elements if e.name]
self.values += d.values
class rng_dataset(rng_base_dataset):
def __init__(self):
rng_base_dataset.__init__(self)
self.name = "root"
self.all_elements = {}
self.elements_caseless = {}
self.defs = {}
self.namespace = ""
self.datatypeLibrary = ""
self.xmlns = ""
self.ref_resolving = {}
self.ref_unresolved = {}
def resolveRefs(self, dataset=None):
if not dataset:
dataset = self
rng_base_dataset.resolveRefs(self, dataset)
for d in list(self.defs.values()):
d.resolveRefs(dataset)
for e in list(self.all_elements.values()):
e.resolveRefs(dataset)
for a in self.attributes[:]:
a.resolveRefs(dataset)
self.resolveUnresolvedRefs()
def resolveCircularRefs(self):
for ref in list(self.ref_circular.keys())[:]:
# print "resolving earlier circular reference %s"%ref
el = self.ref_circular[ref]
del self.ref_circular[ref]
for e in el:
e.resolveRefs(self)
def resolveUnresolvedRefs(self):
for ref in list(self.ref_unresolved.keys())[:]:
print("resolving earlier unresolved reference %s" % ref)
el = self.ref_unresolved[ref]
del self.ref_unresolved[ref]
for e in el:
e.resolveRefs(self)
def element_info(self, element_name):
name = element_name.lower()
if name in self.elements_caseless:
return self.elements_caseless[name]
return None
def possible_children(self, element_name=None):
if not element_name:
return [el.name for el in self.elements]
else:
name = element_name.lower()
if name not in self.elements_caseless:
return []
return [el.name for el in self.elements_caseless[name].elements]
def possible_attributes(self, element_name):
name = element_name.lower()
if name in self.elements_caseless:
return [a.name for a in self.elements_caseless[name].attributes]
return []
def possible_attribute_values(self, element_name, attribute_name):
el = self.element_info(element_name)
if el:
for a in el.attributes:
if attribute_name == a.name:
return a.values
return []
def all_element_types(self):
return list(self.all_elements.keys())
def dump(self, stream):
print("RNG NS: %s" % self.xmlns)
print("Namespace: %s" % self.namespace)
print("datatypeLibrary: %s" % self.datatypeLibrary)
print("-"*60)
for e in self.elements:
e.dump(stream)
print("-"*60)
for e in list(self.all_elements.values()):
e.dump(stream)
print("-"*60)
class rng_node_info(rng_base_dataset):
def __init__(self, node):
rng_base_dataset.__init__(self)
self.name = node.attrib.get("name")
self._node = node
class element_info(rng_node_info):
def dump(self, stream):
attrs = []
for n, v in list(self._node.attrib.items()):
attrs.append('%s="%s"' % (n, v))
stream.write("<element %s>\n" % ' '.join(attrs))
names = [el.name for el in self.elements]
stream.write(" children %r\n" % names)
for attr in self.attributes:
attr.dump(stream)
stream.write(" refs remaining: %r\n" % self.refs)
class attribute_info(rng_node_info):
def dump(self, stream):
stream.write(" attr %s %r\n" % (self.name, self.values))
class definition(rng_node_info):
def dump(self, stream):
stream.write("definition %s has %d refs\n" % (
self.name, len(self.refs)))
names = [el.name for el in self.elements]
stream.write(" has %d elements %r\n" % (
len(self.elements), names))
names = [el.name for el in self.attributes]
stream.write(" has %d attributes %r\n" % (
len(self.attributes), names))
stream.write(" has %d values %r\n" % (
len(self.values), self.values))
def resolveRefs(self, dataset):
for e in self.elements[:]:
e.resolveRefs(dataset)
for a in self.attributes[:]:
a.resolveRefs(dataset)
rng_node_info.resolveRefs(self, dataset)
class rng:
def __init__(self, filename, dataset=None):
if dataset is None:
dataset = rng_dataset()
self.dataset = dataset
self._element_stack = [self.dataset]
self._includes = []
self.filename = filename
self.parse()
def parse(self):
self.tree = ElementTree.parse(self.filename, NamespaceParser())
self.root = self.tree.getroot()
if self.root.tagName != "grammar":
raise "Invalid RNG file [%s] root tag [%s]" % (
self.filename, self.root.tagName)
self.parent_map = dict((
c, p) for p in self.tree.getiterator() for c in p)
self.parseNode(self.root)
self.dataset.resolveRefs()
def parseNode(self, node):
methodName = "handle_%s" % node.tagName
# print methodName
if hasattr(self, methodName):
fn = getattr(self, methodName)
fn(node)
for child in list(node):
# print "parsing child %s"%child.tagName
self.parseNode(child)
methodName = "handle_%s_end" % node.tagName
# print methodName
if hasattr(self, methodName):
fn = getattr(self, methodName)
fn(node)
def handle_include(self, node):
# XXX handle relative dirs
path = node.attrib.get("href")
if not os.path.exists(path):
path = os.path.join(os.path.dirname(self.filename), path)
# print "file included [%s]"%path
rng(path, self.dataset)
def handle_grammar(self, node):
if not self.dataset.namespace:
self.dataset.xmlns = node.attrib.get('xmlns')
self.dataset.namespace = node.attrib.get('ns')
self.dataset.datatypeLibrary = node.attrib.get('datatypeLibrary')
# def handle_start(self, node):
# self._element_stack.append(self)
# def handle_start_end(self, node):
# self._element_stack.pop()
def handle_attribute(self, node):
self._element_stack.append(attribute_info(node))
def handle_attribute_end(self, node):
# attributes get added to the last item in the element stack
attr = self._element_stack.pop()
el = self._element_stack[-1]
el.attributes.append(attr)
def handle_name_end(self, node):
# is the parent node an attribute?
parent = self.parent_map[node]
if node.text and parent.tagName == "attribute":
# print "name value...%r"%node.text
e = self._element_stack[-1]
e.name = node.text
self.dataset.all_elements[node.text] = e
self.dataset.elements_caseless[node.text.lower()] = e
def handle_element(self, node):
# print "handle_element %s" %node.attrib.get("name")
e = element_info(node)
if e.name:
self.dataset.all_elements[e.name] = e
self.dataset.elements_caseless[e.name.lower()] = e
self._element_stack.append(e)
def handle_element_end(self, node):
# print "handle_element_end %s" %node.attrib.get("name")
el = self._element_stack.pop()
self._element_stack[-1].elements.append(el)
def handle_define(self, node):
d = definition(node)
# print "definition: %s" % d.name
self.dataset.defs[d.name] = d
self._element_stack.append(d)
def handle_define_end(self, node):
d = self._element_stack.pop()
def handle_ref(self, node):
self._element_stack[-1].refs.append(node.attrib.get("name"))
def handle_value(self, node):
self._element_stack[-1].values.append(node.text)
# def handle_zeroOrMore(self, node):
# pass
# def handle_choice(self, node):
# pass
# def handle_interleave(self, node):
# pass
# def handle_mixed(self, node):
# pass
# def handle_empty(self, node):
# pass
# def handle_notAllowed(self, node):
# pass
# def handle_group(self, node):
# pass
# def handle_optional(self, node):
# pass
# def handle_text(self, node):
# pass
# def handle_div(self, node):
# pass
# def handle_list(self, node):
# pass
# def handle_data(self, node):
# pass
# def handle_except(self, node):
# pass
# def handle_oneOrMore(self, node):
# pass
# def handle_param(self, node):
# pass
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
machine = rng(filename)
else:
import os
import sys
# we're in src/python-sitelib, we need the contrib dir
basedir = os.path.dirname(os.path.dirname(os.getcwd()))
filename = os.path.join(
basedir, "contrib", "catalogs", "rng", "xslt.rng")
machine = rng(filename)
# assert "template" in machine.possible_children("stylesheet")
# assert "text" in machine.all_element_types()
# assert machine.possible_children("text")==[]
# assert machine.possible_children("garbage")==[]
# assert "version" in machine.possible_attributes("transform")
# assert machine.possible_attributes("garbage")==[]
# assert "upper-first" in machine.possible_attribute_values("sort", "case-order")
# assert machine.possible_attribute_values("garbage", "garbage") == []
# assert machine.possible_attribute_values("garbate", "case-order") == []
## filename = "..\\languages\\xhtml\\xhtml-state-machine.xml"
## machine = state_machine_info(filename)
## for element in machine.all_element_types():
## if element!="#LITERAL":
# assert "lang" in machine.possible_attributes(element), "no
# lang on %s" % element
machine.dataset.dump(sys.stdout)
# machine.dataset.element_info("tr").dump(sys.stdout)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.crypto import salted_hmac
from .test_models import MessageModelFactory, BlacklistedEmailFactory
from ..models import Message, BLACKLIST_HMAC_SALT, BlacklistedEmail
class StartViewTest(TestCase):
url = reverse('messaging:start')
def test_renders(self):
MessageModelFactory(sender_approved_public=True, sender_approved_public_named=False,
recipient_approved_public=True, recipient_approved_public_named=True,
admin_approved_public=True)
msg = MessageModelFactory(sender_approved_public=True, sender_approved_public_named=True,
recipient_approved_public=True, recipient_approved_public_named=False,
admin_approved_public=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, msg.sender_name)
self.assertNotContains(response, msg.recipient_name)
self.assertContains(response, msg.message)
self.assertNotContains(response, msg.sender_email)
self.assertNotContains(response, msg.recipient_email)
class FaqViewTest(TestCase):
url = reverse('messaging:faq')
def test_renders(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class ArchiveViewTest(TestCase):
url = reverse('messaging:archive')
def test_renders_no_public_messages(self):
MessageModelFactory(sender_approved_public=True, sender_approved_public_named=True,
recipient_approved_public=True, recipient_approved_public_named=True,
admin_approved_public=False)
MessageModelFactory(sender_approved_public=False, sender_approved_public_named=True,
recipient_approved_public=True, recipient_approved_public_named=True,
admin_approved_public=True)
MessageModelFactory(sender_approved_public=True, sender_approved_public_named=True,
recipient_approved_public=False, recipient_approved_public_named=True,
admin_approved_public=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['message_list'].count())
def test_renders_named_messages(self):
msg = MessageModelFactory(sender_approved_public=True, sender_approved_public_named=True,
recipient_approved_public=True, recipient_approved_public_named=True,
admin_approved_public=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, msg.sender_name)
self.assertContains(response, msg.recipient_name)
self.assertContains(response, msg.message)
self.assertNotContains(response, msg.sender_email)
self.assertNotContains(response, msg.recipient_email)
def test_renders_unnamed_messages(self):
MessageModelFactory(sender_approved_public=True, sender_approved_public_named=False,
recipient_approved_public=True, recipient_approved_public_named=True,
admin_approved_public=True)
msg = MessageModelFactory(sender_approved_public=True, sender_approved_public_named=True,
recipient_approved_public=True, recipient_approved_public_named=False,
admin_approved_public=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, msg.sender_name)
self.assertNotContains(response, msg.recipient_name)
self.assertContains(response, msg.message)
self.assertNotContains(response, msg.sender_email)
self.assertNotContains(response, msg.recipient_email)
class BlacklistViewTest(TestCase):
url_name = 'messaging:blacklist_email'
def setUp(self):
self.message = MessageModelFactory()
self.correct_digest = salted_hmac(BLACKLIST_HMAC_SALT, self.message.recipient_email).hexdigest()
self.url_kwargs = {'email': self.message.recipient_email, 'digest': self.correct_digest}
self.url = reverse(self.url_name, kwargs=self.url_kwargs)
def test_renders(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_confirm(self):
response = self.client.post(self.url)
self.assertRedirects(response, reverse('messaging:start'))
obj = BlacklistedEmail.objects.get()
self.assertEqual(obj.email, self.message.recipient_email)
self.assertEqual(obj.stripped_email, 'recipientrecipient@null')
def test_validates_digest(self):
self.url_kwargs['email'] = self.message.sender_email
self.url = reverse(self.url_name, kwargs=self.url_kwargs)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
response = self.client.post(self.url)
self.assertEqual(response.status_code, 404)
self.assertFalse(BlacklistedEmail.objects.count())
class SendViewTest(TestCase):
url = reverse('messaging:send')
def setUp(self):
super(SendViewTest, self).setUp()
self.post_data = {
'sender_name': 'sender name',
'sender_email': 'SEN.DER+FOOBAR@erik.io',
'recipient_name': 'recipient name',
'recipient_email': 'recipient@erik.io',
'message': 'message',
'sender_named': True,
'sender_approved_public': True,
'sender_approved_public_named': True,
}
def test_renders(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_post_valid(self):
response = self.client.post(self.url, self.post_data)
self.assertRedirects(response, reverse('messaging:sender_confirmation_sent'))
self.assertEqual(len(mail.outbox), 1)
message = Message.objects.get()
self.assertEqual(message.status, Message.STATUS.pending_sender_confirmation)
self.assertEqual(mail.outbox[0].recipients(), [message.sender_email])
self.assertTrue(message.identifier in mail.outbox[0].body)
self.assertTrue(message.sender_email_token in mail.outbox[0].body)
def test_post_invalid_conflicting_publicity(self):
self.post_data['sender_approved_public'] = False
response = self.client.post(self.url, self.post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1)
self.assertEqual(len(mail.outbox), 0)
def test_post_blacklisted_sender(self):
BlacklistedEmailFactory(email='sender@erik.io', stripped_email='sender@erikio')
response = self.client.post(self.url, self.post_data)
self.assertRedirects(response, reverse('messaging:sender_confirmation_sent'))
self.assertEqual(len(mail.outbox), 0)
def test_post_ratelimited_sender(self):
for i in range(settings.MAX_MESSAGES + 1):
MessageModelFactory(sender_email='sender@erik.io', sender_email_stripped='sender@erikio')
response = self.client.post(self.url, self.post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1)
self.assertEqual(len(mail.outbox), 0)
def test_post_ratelimited_recipient(self):
for i in range(settings.MAX_MESSAGES + 1):
MessageModelFactory(recipient_email='sender@erik.io', recipient_email_stripped='recipient@erikio')
response = self.client.post(self.url, self.post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['form'].errors), 1)
self.assertEqual(len(mail.outbox), 0)
class MessageSentViewTest(TestCase):
url = reverse('messaging:sender_confirmation_sent')
def test_renders(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class MessageSenderConfirmationView(TestCase):
url_name = 'messaging:sender_confirm'
def setUp(self):
self.message = MessageModelFactory(sender_email_token='a-b-c', status=Message.STATUS.pending_sender_confirmation)
url_kwargs = {'identifier': self.message.identifier, 'token': self.message.sender_email_token}
self.url = reverse(self.url_name, kwargs=url_kwargs)
def test_confirm_anonymous(self):
response = self.client.get(self.url)
self.assertRedirects(response, reverse('messaging:sender_confirmed'))
self.assertEqual(len(mail.outbox), 1)
self.message.refresh_from_db()
self.assertEqual(self.message.status, Message.STATUS.sent)
self.assertEqual(mail.outbox[0].recipients(), [self.message.recipient_email])
self.assertFalse(self.message.sender_name in mail.outbox[0].body)
self.assertFalse(self.message.sender_email in mail.outbox[0].body)
self.assertTrue(self.message.identifier in mail.outbox[0].body)
self.assertTrue(self.message.recipient_email_token in mail.outbox[0].body)
def test_confirm_named(self):
self.message.sender_named = True
self.message.save()
response = self.client.get(self.url)
self.assertRedirects(response, reverse('messaging:sender_confirmed'))
self.assertEqual(len(mail.outbox), 1)
self.message.refresh_from_db()
self.assertTrue(mail.outbox[0].recipients(), [self.message.recipient_email])
self.assertTrue(self.message.sender_name in mail.outbox[0].body)
self.assertTrue(self.message.identifier in mail.outbox[0].body)
def test_bad_token(self):
self.message.sender_email_token = 'o-t-h-e-r'
self.message.recipient_email_token = 'a-b-c'
self.message.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['not_found'])
def test_bad_status(self):
self.message.status = Message.STATUS.sent
self.message.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['already_confirmed'])
def test_confirm_blacklisted_recipient(self):
BlacklistedEmailFactory(email='recipient@erik.io', stripped_email='recipientrecipient@null')
response = self.client.get(self.url)
self.assertRedirects(response, reverse('messaging:sender_confirmed'))
self.assertEqual(len(mail.outbox), 0)
class MessageSenderConfirmedView(TestCase):
url = reverse('messaging:sender_confirmed')
def test_renders(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class MessageRecipientMessageUpdate(TestCase):
url_name = 'messaging:recipient_message_update'
def setUp(self):
self.message = MessageModelFactory(recipient_email_token='a-b-c', status=Message.STATUS.sent)
url_kwargs = {'identifier': self.message.identifier, 'token': self.message.recipient_email_token}
self.url = reverse(self.url_name, kwargs=url_kwargs)
def test_confirm(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_post_valid(self):
self.assertFalse(self.message.recipient_approved_public)
response = self.client.post(self.url, {'recipient_approved_public': True})
self.assertRedirects(response, self.url)
self.message.refresh_from_db()
self.assertTrue(self.message.recipient_approved_public)
def test_post_invalid(self):
self.assertFalse(self.message.recipient_approved_public_named)
response = self.client.post(self.url, {'recipient_approved_public_named': True})
self.assertEqual(response.status_code, 200)
self.message.refresh_from_db()
self.assertFalse(self.message.recipient_approved_public_named)
def test_bad_token(self):
self.message.sender_email_token = 'a-b-c'
self.message.recipient_email_token = 'o-t-h-e-r'
self.message.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_bad_status(self):
self.message.status = Message.STATUS.pending_sender_confirmation
self.message.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
|
|
#!/usr/bin/env python3
#
# Copyright 2016 Red Hat, Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from django.conf.urls import url
from django.http import HttpResponse, Http404
from django.urls import reverse
from django.core.exceptions import PermissionDenied
from django.template import Template, Context
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.conf import settings
from mod import PatchewModule
import smtplib
import email
import email.utils
import uuid
from api.models import Message, Project
from event import register_handler, get_events_info
import schema
_default_config = """
[smtp]
server = smtp.example.com
ssl = True
port = 465
username = youruser
password = yourpassword
from = your@email.com
"""
class DebugSMTP(object):
def sendmail(*args):
print("SMPT: debug mode, not sending\n" + "\n".join([str(x) for x in args]))
class EmailModule(PatchewModule):
(
"""
Documentation
-------------
Email information is configured in "INI" style:
"""
+ _default_config
)
name = "email" # The notify method name
default_config = _default_config
email_schema = schema.ArraySchema(
"{name}",
"Email Notification",
desc="Email notification",
members=[
schema.EnumSchema(
"event",
"Event",
enums=lambda: get_events_info(),
required=True,
desc="Which event to trigger the email notification",
),
schema.BooleanSchema(
"enabled", "Enabled", desc="Whether this event is enabled", default=True
),
schema.BooleanSchema(
"reply_to_all",
"Reply to all",
desc='If set, Cc all the receipients of the email message associated to the event. Also, if set the original sender of the email message will be a recipient even if the "to" field is nonempty',
default=False,
),
schema.BooleanSchema(
"in_reply_to",
"Set In-Reply-To",
desc="Whether to set In-Reply-To to the message id, if the event has an associated email message",
default=True,
),
schema.BooleanSchema(
"set_reply_to",
"Set Reply-To",
desc="Whether to set Reply-To to the project mailing list, if the event has an associated email message",
default=True,
),
schema.BooleanSchema(
"reply_subject",
"Set replying subject",
desc='Whether to set Subject to "Re: xxx", if the event has an associated email message',
default=True,
),
schema.BooleanSchema(
"to_user",
"Send to user",
desc="Whether to set To to a user email, if the event has an associated user",
default=False,
),
schema.StringSchema("to", "To", desc="Send email to"),
schema.StringSchema("cc", "Cc", desc="Cc list"),
schema.StringSchema(
"subject_template",
"Subject template",
desc="""The django template for subject""",
required=True,
),
schema.StringSchema(
"body_template",
"Body template",
desc="The django template for email body.",
multiline=True,
required=True,
),
],
)
project_config_schema = schema.ArraySchema(
"email",
desc="Configuration for email module",
members=[
schema.MapSchema(
"notifications",
"Email notifications",
desc="Email notifications",
item=email_schema,
)
],
)
def __init__(self):
register_handler(None, self.on_event)
def _get_smtp(self):
server = self.get_config("smtp", "server")
port = self.get_config("smtp", "port")
username = self.get_config("smtp", "username")
password = self.get_config("smtp", "password")
ssl = self.get_config("smtp", "ssl", "getboolean")
if settings.DEBUG:
return DebugSMTP()
elif ssl:
smtp = smtplib.SMTP_SSL(server, port)
else:
smtp = smtplib.SMTP(server, port)
if self.get_config("smtp", "auth", "getboolean"):
smtp.login(username, password)
return smtp
def _send_series_recurse(self, sendmethod, s):
sendmethod(s)
for i in s.get_replies():
self._send_series_recurse(sendmethod, i)
def _smtp_send(self, to, cc, message):
from_addr = self.get_config("smtp", "from")
message["Resent-From"] = message["From"]
for k, v in [("From", from_addr), ("To", to), ("Cc", cc)]:
if not v:
continue
if isinstance(v, list):
v = ", ".join(v)
try:
message.replace_header(k, v)
except KeyError:
message[k] = v
smtp = self._get_smtp()
recipients = []
for x in [to, cc]:
if not x:
continue
if isinstance(x, str):
recipients += [x]
elif isinstance(x, list):
recipients += x
smtp.sendmail(from_addr, recipients, message.as_string())
@method_decorator(require_POST)
def www_view_email_bounce(self, request, message_id):
if not request.user.is_authenticated:
raise PermissionDenied()
m = Message.objects.find_series(message_id)
if not m:
raise Http404("Series not found: " + message_id)
def send_one(m):
msg = m.get_mbox()
message = email.message_from_string(msg)
self._smtp_send(request.user.email, None, message)
self._send_series_recurse(send_one, m)
return HttpResponse("email bounced")
def www_url_hook(self, urlpatterns):
urlpatterns.append(
url(
r"^email-bounce/(?P<message_id>.*)/",
self.www_view_email_bounce,
name="email-bounce",
)
)
def prepare_message_hook(self, request, message, detailed):
if not detailed:
return
if (
message.is_series_head
and request.user.is_authenticated
and request.user.email
):
message.extra_ops.append(
{
"url": reverse(
"email-bounce", kwargs={"message_id": message.message_id}
),
"icon": "share",
"title": "Bounce to me",
}
)
def _sections_by_event(self, event):
conf = self.get_config_obj()
for sec in conf.sections():
if sec.startswith("mail ") and conf.get(sec, "event") == event:
yield sec
def _send_email(self, to, cc, headers, body):
message = email.message.Message()
for k, v in headers.items():
message[k] = v
message.set_payload(body, charset="utf-8")
self._smtp_send(to, cc, message)
def gen_message_id(self):
return "<%s@patchew.org>" % uuid.uuid1()
def get_notifications(self, project):
return self.get_project_config(project).get("notifications", {})
def on_event(self, event, **params):
class EmailCancelled(Exception):
pass
po = None
mo = None
for v in list(params.values()):
if isinstance(v, Message):
mo = v
po = mo.project
break
elif isinstance(v, Project):
po = v
break
if not po:
return
for nt in list(self.get_notifications(po).values()):
headers = {}
if not nt["enabled"]:
continue
if nt["event"] != event:
continue
def cancel_email():
raise EmailCancelled
params["cancel"] = cancel_email
ctx = Context(params, autoescape=False)
try:
subject = Template(nt["subject_template"]).render(ctx).strip()
body = Template(nt["body_template"]).render(ctx).strip()
to = [x.strip() for x in Template(nt["to"]).render(ctx).strip().split()]
cc = [x.strip() for x in Template(nt["cc"]).render(ctx).strip().split()]
except EmailCancelled:
continue
if mo:
if nt["reply_to_all"] or not len(to):
to += [mo.get_sender_addr()]
if nt["reply_to_all"]:
cc += [x[1] for x in mo.recipients]
if mo and nt["in_reply_to"]:
headers["In-Reply-To"] = "<%s>" % mo.message_id
if mo and nt["set_reply_to"]:
headers["Reply-To"] = "<%s>" % mo.project.mailing_list
if nt["reply_subject"] and mo:
subject = (
"Re: " + mo.subject
if not mo.subject.startswith("Re:")
else mo.subject
)
if nt["to_user"] and "user" in params and params["user"].email:
to += params["user"].email
if not (subject and body and (to or cc)):
continue
headers["Subject"] = subject
headers["Message-ID"] = email.utils.make_msgid()
self._send_email(to, cc, headers, body)
def prepare_project_hook(self, request, project):
if not project.maintained_by(request.user):
return
project.extra_info.append(
{
"title": "Email notifications",
"class": "info",
"content_html": self.build_config_html(request, project),
}
)
|
|
"""Test to verify that Home Assistant core works."""
# pylint: disable=protected-access
import asyncio
import logging
import os
import unittest
from unittest.mock import patch, MagicMock
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
import voluptuous as vol
import pytz
import pytest
import homeassistant.core as ha
from homeassistant.exceptions import (InvalidEntityFormatError,
InvalidStateError)
from homeassistant.util.async_ import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM,
ATTR_NOW, EVENT_TIME_CHANGED, EVENT_TIMER_OUT_OF_SYNC, ATTR_SECONDS,
EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_CLOSE,
EVENT_SERVICE_REGISTERED, EVENT_SERVICE_REMOVED, EVENT_CALL_SERVICE)
from tests.common import get_test_home_assistant, async_mock_service
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.loop.run_in_executor.mock_calls) == 1
@patch('asyncio.iscoroutine', return_value=True)
def test_async_create_task_schedule_coroutine(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_create_task(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
def test_stage_shutdown():
"""Simulate a shutdown, test calling stuff."""
hass = get_test_home_assistant()
test_stop = []
test_close = []
test_all = []
hass.bus.listen(
EVENT_HOMEASSISTANT_STOP, lambda event: test_stop.append(event))
hass.bus.listen(
EVENT_HOMEASSISTANT_CLOSE, lambda event: test_close.append(event))
hass.bus.listen('*', lambda event: test_all.append(event))
hass.stop()
assert len(test_stop) == 1
assert len(test_close) == 1
assert len(test_all) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_pending_sheduler(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for _ in range(3):
self.hass.add_job(test_coro())
run_coroutine_threadsafe(
asyncio.wait(self.hass._pending_tasks, loop=self.hass.loop),
loop=self.hass.loop
).result()
assert len(self.hass._pending_tasks) == 3
assert len(call_count) == 3
def test_async_add_job_pending_tasks_coro(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for _ in range(2):
self.hass.add_job(test_coro())
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_executor(self):
"""Run an executor in pending tasks."""
call_count = []
def test_executor():
"""Test executor."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for _ in range(2):
self.hass.add_job(test_executor)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_callback(self):
"""Run a callback in pending tasks."""
call_count = []
@ha.callback
def test_callback():
"""Test callback."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for _ in range(2):
self.hass.add_job(test_callback)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
self.hass.block_till_done()
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 2
def test_add_job_with_none(self):
"""Try to add a job with None as function."""
with pytest.raises(ValueError):
self.hass.add_job(None, 'test_arg')
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
context = ha.Context()
event1, event2 = [
ha.Event('some_type', data, time_fired=now, context=context)
for _ in range(2)
]
assert event1 == event2
def test_repr(self):
"""Test that repr method works."""
assert "<Event TestEvent[L]>" == \
str(ha.Event("TestEvent"))
assert "<Event TestEvent[R]: beer=nice>" == \
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
'context': {
'id': event.context.id,
'user_id': event.context.user_id,
},
}
assert expected == event.as_dict()
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.hass.allow_pool = False
old_count = len(self.bus.listeners)
def listener(_): pass
unsub = self.bus.listen('test', listener)
assert old_count + 1 == len(self.bus.listeners)
# Remove listener
unsub()
assert old_count == len(self.bus.listeners)
# Should do nothing now
unsub()
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
@ha.callback
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event_with_callback(self):
"""Test listen_once_event method."""
runs = []
@ha.callback
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
assert 1 == len(runs)
def test_listen_once_event_with_coroutine(self):
"""Test listen_once_event method."""
runs = []
@asyncio.coroutine
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
assert 1 == len(runs)
def test_listen_once_event_with_thread(self):
"""Test listen_once_event method."""
runs = []
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
assert 1 == len(runs)
def test_thread_event_listener(self):
"""Test thread event listener."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test callback event listener."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test coroutine event listener."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
with pytest.raises(InvalidEntityFormatError):
ha.State('invalid_entity_format', 'test_state')
with pytest.raises(InvalidStateError):
ha.State('domain.long_state', 't' * 256)
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
assert 'some_domain' == state.domain
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
assert 'hello' == state.object_id
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
assert 'hello world' == state.name
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
assert name == state.name
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
assert state == ha.State.from_dict(state.as_dict())
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
assert ha.State.from_dict(None) is None
assert ha.State.from_dict({'state': 'yes'}) is None
assert ha.State.from_dict({'entity_id': 'yes'}) is None
def test_repr(self):
"""Test state.repr."""
assert "<state happy.happy=on @ 1984-12-08T12:00:00+00:00>" == \
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0)))
assert "<state happy.happy=on; brightness=144 @ " \
"1984-12-08T12:00:00+00:00>" == \
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0)))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
assert self.states.is_state('light.Bowl', 'on')
assert not self.states.is_state('light.Bowl', 'off')
assert not self.states.is_state('light.Non_existing', 'on')
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
assert 2 == len(ent_ids)
assert 'light.bowl' in ent_ids
assert 'switch.ac' in ent_ids
ent_ids = self.states.entity_ids('light')
assert 1 == len(ent_ids)
assert 'light.bowl' in ent_ids
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
assert ['light.bowl', 'switch.ac'] == states
def test_remove(self):
"""Test remove method."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
assert 'light.bowl' in self.states.entity_ids()
assert self.states.remove('light.bowl')
self.hass.block_till_done()
assert 'light.bowl' not in self.states.entity_ids()
assert 1 == len(events)
assert 'light.bowl' == events[0].data.get('entity_id')
assert events[0].data.get('old_state') is not None
assert 'light.bowl' == events[0].data['old_state'].entity_id
assert events[0].data.get('new_state') is None
# If it does not exist, we should get False
assert not self.states.remove('light.Bowl')
self.hass.block_till_done()
assert 1 == len(events)
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
@ha.callback
def callback(event):
runs.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
assert self.states.is_state('light.bowl', 'off')
assert 1 == len(runs)
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
assert 0 == len(events)
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
assert 1 == len(events)
def test_service_call_repr():
"""Test ServiceCall repr."""
call = ha.ServiceCall('homeassistant', 'start')
assert str(call) == \
"<ServiceCall homeassistant.start (c:{})>".format(call.context.id)
call2 = ha.ServiceCall('homeassistant', 'start', {'fast': 'yes'})
assert str(call2) == \
"<ServiceCall homeassistant.start (c:{}): fast=yes>".format(
call2.context.id)
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
@ha.callback
def mock_service(call):
pass
self.services.register("Test_Domain", "TEST_SERVICE", mock_service)
self.calls_register = []
@ha.callback
def mock_event_register(event):
"""Mock register event."""
self.calls_register.append(event)
self.hass.bus.listen(EVENT_SERVICE_REGISTERED, mock_event_register)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
assert self.services.has_service("tesT_domaiN", "tesT_servicE")
assert not self.services.has_service("test_domain", "non_existing")
assert not self.services.has_service("non_existing", "test_service")
def test_services(self):
"""Test services."""
assert len(self.services.services) == 1
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register(
"test_domain", "register_calls", service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
assert self.services.call('test_domain', 'REGISTER_CALLS',
blocking=True)
assert 1 == len(calls)
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
with pytest.raises(ha.ServiceNotFound):
self.services.call('test_domain', 'i_do_not_exist', blocking=True)
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register(
'test_domain', 'register_calls', service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
assert self.services.call('test_domain', 'REGISTER_CALLS',
blocking=True)
self.hass.block_till_done()
assert 1 == len(calls)
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register(
'test_domain', 'register_calls', service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
assert self.services.call('test_domain', 'REGISTER_CALLS',
blocking=True)
self.hass.block_till_done()
assert 1 == len(calls)
def test_remove_service(self):
"""Test remove service."""
calls_remove = []
@ha.callback
def mock_event_remove(event):
"""Mock register event."""
calls_remove.append(event)
self.hass.bus.listen(EVENT_SERVICE_REMOVED, mock_event_remove)
assert self.services.has_service('test_Domain', 'test_Service')
self.services.remove('test_Domain', 'test_Service')
self.hass.block_till_done()
assert not self.services.has_service('test_Domain', 'test_Service')
assert len(calls_remove) == 1
assert calls_remove[-1].data['domain'] == 'test_domain'
assert calls_remove[-1].data['service'] == 'test_service'
def test_remove_service_that_not_exists(self):
"""Test remove service that not exists."""
calls_remove = []
@ha.callback
def mock_event_remove(event):
"""Mock register event."""
calls_remove.append(event)
self.hass.bus.listen(EVENT_SERVICE_REMOVED, mock_event_remove)
assert not self.services.has_service('test_xxx', 'test_yyy')
self.services.remove('test_xxx', 'test_yyy')
self.hass.block_till_done()
assert len(calls_remove) == 0
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.config = ha.Config()
assert self.config.config_dir is None
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
assert "/tmp/ha-config/test.conf" == \
self.config.path("test.conf")
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
assert "/tmp/ha-config/dir/test.conf" == \
self.config.path("dir", "test.conf")
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
'elevation': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': set(),
'config_dir': '/tmp/ha-config',
'whitelist_external_dirs': set(),
'version': __version__,
}
assert expected == self.config.as_dict()
def test_is_allowed_path(self):
"""Test is_allowed_path method."""
with TemporaryDirectory() as tmp_dir:
# The created dir is in /tmp. This is a symlink on OS X
# causing this test to fail unless we resolve path first.
self.config.whitelist_external_dirs = set((
os.path.realpath(tmp_dir),
))
test_file = os.path.join(tmp_dir, "test.jpg")
with open(test_file, "w") as tmp_file:
tmp_file.write("test")
valid = [
test_file,
tmp_dir,
os.path.join(tmp_dir, 'notfound321')
]
for path in valid:
assert self.config.is_allowed_path(path)
self.config.whitelist_external_dirs = set(('/home', '/var'))
unvalid = [
"/hass/config/secure",
"/etc/passwd",
"/root/secure_file",
"/var/../etc/passwd",
test_file,
]
for path in unvalid:
assert not self.config.is_allowed_path(path)
with pytest.raises(AssertionError):
self.config.is_allowed_path(None)
@patch('homeassistant.core.monotonic')
def test_create_timer(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
mock_monotonic.side_effect = 10.2, 10.8, 11.3
with patch.object(ha, 'callback', mock_callback), \
patch('homeassistant.core.dt_util.utcnow',
return_value=datetime(2018, 12, 31, 3, 4, 5, 333333)):
ha._async_create_timer(hass)
assert len(funcs) == 2
fire_time_event, stop_timer = funcs
assert len(hass.loop.call_later.mock_calls) == 1
delay, callback, target = hass.loop.call_later.mock_calls[0][1]
assert abs(delay - 0.666667) < 0.001
assert callback is fire_time_event
assert abs(target - 10.866667) < 0.001
with patch('homeassistant.core.dt_util.utcnow',
return_value=datetime(2018, 12, 31, 3, 4, 6, 100000)):
callback(target)
assert len(hass.bus.async_listen_once.mock_calls) == 1
assert len(hass.bus.async_fire.mock_calls) == 1
assert len(hass.loop.call_later.mock_calls) == 2
event_type, callback = hass.bus.async_listen_once.mock_calls[0][1]
assert event_type == EVENT_HOMEASSISTANT_STOP
assert callback is stop_timer
delay, callback, target = hass.loop.call_later.mock_calls[1][1]
assert abs(delay - 0.9) < 0.001
assert callback is fire_time_event
assert abs(target - 12.2) < 0.001
event_type, event_data = hass.bus.async_fire.mock_calls[0][1]
assert event_type == EVENT_TIME_CHANGED
assert event_data[ATTR_NOW] == datetime(2018, 12, 31, 3, 4, 6, 100000)
@patch('homeassistant.core.monotonic')
def test_timer_out_of_sync(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
mock_monotonic.side_effect = 10.2, 13.3, 13.4
with patch.object(ha, 'callback', mock_callback), \
patch('homeassistant.core.dt_util.utcnow',
return_value=datetime(2018, 12, 31, 3, 4, 5, 333333)):
ha._async_create_timer(hass)
delay, callback, target = hass.loop.call_later.mock_calls[0][1]
with patch('homeassistant.core.dt_util.utcnow',
return_value=datetime(2018, 12, 31, 3, 4, 8, 200000)):
callback(target)
event_type, event_data = hass.bus.async_fire.mock_calls[1][1]
assert event_type == EVENT_TIMER_OUT_OF_SYNC
assert abs(event_data[ATTR_SECONDS] - 2.433333) < 0.001
assert len(funcs) == 2
fire_time_event, stop_timer = funcs
assert len(hass.loop.call_later.mock_calls) == 2
delay, callback, target = hass.loop.call_later.mock_calls[1][1]
assert abs(delay - 0.8) < 0.001
assert callback is fire_time_event
assert abs(target - 14.2) < 0.001
@asyncio.coroutine
def test_hass_start_starts_the_timer(loop):
"""Test when hass starts, it starts the timer."""
hass = ha.HomeAssistant(loop=loop)
try:
with patch('homeassistant.core._async_create_timer') as mock_timer:
yield from hass.async_start()
assert hass.state == ha.CoreState.running
assert not hass._track_task
assert len(mock_timer.mock_calls) == 1
assert mock_timer.mock_calls[0][1][0] is hass
finally:
yield from hass.async_stop()
assert hass.state == ha.CoreState.not_running
@asyncio.coroutine
def test_start_taking_too_long(loop, caplog):
"""Test when async_start takes too long."""
hass = ha.HomeAssistant(loop=loop)
caplog.set_level(logging.WARNING)
try:
with patch('homeassistant.core.timeout',
side_effect=asyncio.TimeoutError), \
patch('homeassistant.core._async_create_timer') as mock_timer:
yield from hass.async_start()
assert hass.state == ha.CoreState.running
assert len(mock_timer.mock_calls) == 1
assert mock_timer.mock_calls[0][1][0] is hass
assert 'Something is blocking Home Assistant' in caplog.text
finally:
yield from hass.async_stop()
assert hass.state == ha.CoreState.not_running
@asyncio.coroutine
def test_track_task_functions(loop):
"""Test function to start/stop track task and initial state."""
hass = ha.HomeAssistant(loop=loop)
try:
assert hass._track_task
hass.async_stop_track_tasks()
assert not hass._track_task
hass.async_track_tasks()
assert hass._track_task
finally:
yield from hass.async_stop()
async def test_service_executed_with_subservices(hass):
"""Test we block correctly till all services done."""
calls = async_mock_service(hass, 'test', 'inner')
async def handle_outer(call):
"""Handle outer service call."""
calls.append(call)
call1 = hass.services.async_call('test', 'inner', blocking=True,
context=call.context)
call2 = hass.services.async_call('test', 'inner', blocking=True,
context=call.context)
await asyncio.wait([call1, call2])
calls.append(call)
hass.services.async_register('test', 'outer', handle_outer)
await hass.services.async_call('test', 'outer', blocking=True)
assert len(calls) == 4
assert [call.service for call in calls] == [
'outer', 'inner', 'inner', 'outer']
async def test_service_call_event_contains_original_data(hass):
"""Test that service call event contains original data."""
events = []
@ha.callback
def callback(event):
events.append(event)
hass.bus.async_listen(EVENT_CALL_SERVICE, callback)
calls = async_mock_service(hass, 'test', 'service', vol.Schema({
'number': vol.Coerce(int)
}))
await hass.services.async_call('test', 'service', {
'number': '23'
}, blocking=True)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data['service_data']['number'] == '23'
assert len(calls) == 1
assert calls[0].data['number'] == 23
|
|
"""
Input/Output tools for working with binary data.
The Stata input tools were originally written by Joe Presbrey as part of PyDTA.
You can find more information here http://presbrey.mit.edu/PyDTA
See Also
--------
numpy.lib.io
"""
import warnings
from statsmodels.compat.python import (lzip, lmap, lrange,
lfilter, asbytes, asstr)
from struct import unpack, calcsize, pack
from struct import error as struct_error
import datetime
import sys
import numpy as np
import statsmodels.tools.data as data_util
from pandas import isnull
from pandas.io.stata import StataMissingValue
from statsmodels.iolib.openfile import get_file_obj
_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"]
def _datetime_to_stata_elapsed(date, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : datetime.datetime
The date to convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
if not isinstance(date, datetime.datetime):
raise ValueError("date should be datetime.datetime format")
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
delta = date - stata_epoch
return (delta.days * 86400000 + delta.seconds*1000 +
delta.microseconds/1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.", UserWarning)
return date
elif fmt in ["%td", "td"]:
return (date- stata_epoch).days
elif fmt in ["%tw", "tw"]:
return (52*(date.year-stata_epoch.year) +
(date - datetime.datetime(date.year, 1, 1)).days / 7)
elif fmt in ["%tm", "tm"]:
return (12 * (date.year - stata_epoch.year) + date.month - 1)
elif fmt in ["%tq", "tq"]:
return 4*(date.year-stata_epoch.year) + int((date.month - 1)/3)
elif fmt in ["%th", "th"]:
return 2 * (date.year - stata_epoch.year) + int(date.month > 6)
elif fmt in ["%ty", "ty"]:
return date.year
else:
raise ValueError("fmt %s not understood" % fmt)
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : int
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Examples
--------
>>> _stata_elapsed_date_to_datetime(52, "%tw") datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you do not have pandas with datetime support, then you cannot do
milliseconds accurately.
"""
#NOTE: we could run into overflow / loss of precision situations here
# casting to int, but I'm not sure what to do. datetime will not deal with
# numpy types and numpy datetime is not mature enough / we cannot rely on
# pandas version > 0.7.1
#TODO: IIRC relative delta does not play well with np.datetime?
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
from dateutil.relativedelta import relativedelta
return stata_epoch + relativedelta(microseconds=date*1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.",
UserWarning)
return date
elif fmt in ["%td", "td"]:
return stata_epoch + datetime.timedelta(int(date))
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = datetime.datetime(stata_epoch.year + date // 52, 1, 1)
day_delta = (date % 52 ) * 7
return year + datetime.timedelta(int(day_delta))
elif fmt in ["%tm", "tm"]:
year = stata_epoch.year + date // 12
month_delta = (date % 12 ) + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%tq", "tq"]:
year = stata_epoch.year + date // 4
month_delta = (date % 4) * 3 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%th", "th"]:
year = stata_epoch.year + date // 2
month_delta = (date % 2) * 6 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%ty", "ty"]:
if date > 0:
return datetime.datetime(date, 1, 1)
else: # do not do negative years bc cannot mix dtypes in column
raise ValueError("Year 0 and before not implemented")
else:
raise ValueError("Date fmt %s not understood" % fmt)
### Helper classes for StataReader ###
class _StataVariable(object):
"""
A dataset variable. Not intended for public use.
Parameters
----------
variable_data
Attributes
----------
format : str
Stata variable format. See notes for more information.
index : int
Zero-index column index of variable.
label : str
Data Label
name : str
Variable name
type : str
Stata data type. See notes for more information.
value_format : str
Value format.
Notes
-----
More information: http://www.stata.com/help.cgi?format
"""
def __init__(self, variable_data):
self._data = variable_data
def __int__(self):
"""the variable's index within an observation"""
return self.index
def __str__(self):
"""the name of the variable"""
return self.name
@property
def index(self):
"""the variable's index within an observation"""
return self._data[0]
@property
def type(self):
"""
The data type of variable
Possible types are:
{1..244:string, b:byte, h:int, l:long, f:float, d:double)
"""
return self._data[1]
@property
def name(self):
"""the name of the variable"""
return self._data[2]
@property
def format(self):
"""the variable's Stata format"""
return self._data[4]
@property
def value_format(self):
"""the variable's value format"""
return self._data[5]
@property
def label(self):
"""The variable's label"""
return self._data[6]
class StataReader(object):
"""
Stata .dta file reader.
.. deprecated:: 0.11
Use pandas.read_stata or pandas.io.stata.StataReader
Provides methods to return the metadata of a Stata .dta file and
a generator for the data itself.
Parameters
----------
file : file-like
A file-like object representing a Stata .dta file.
missing_values : bool
If missing_values is True, parse missing_values and return a
Missing Values object instead of None.
encoding : str, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
See Also
--------
statsmodels.iolib.foreign.genfromdta
pandas.read_stata
pandas.io.stata.StataReader
Notes
-----
This is known only to work on file formats 113 (Stata 8/9), 114
(Stata 10/11), and 115 (Stata 12). Needs to be tested on older versions.
Known not to work on format 104, 108. If you have the documentation for
older formats, please contact the developers.
For more information about the .dta format see
http://www.stata.com/help.cgi?dta
http://www.stata.com/help.cgi?dta_113
"""
_header = {}
_data_location = 0
_col_sizes = ()
_has_string_data = False
_missing_values = False
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(lzip(lrange(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = lrange(251)+list('bhlfd')
#NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
MISSING_VALUES = { 'b': (-127,100), 'h': (-32767, 32740), 'l':
(-2147483647, 2147483620), 'f': (-1.701e+38, +1.701e+38), 'd':
(-1.798e+308, +8.988e+307) }
def __init__(self, fname, missing_values=False, encoding=None):
warnings.warn(
"StataReader is deprecated as of 0.10.0 and will be removed after"
" the 0.12 release. Use pandas.read_stata or "
"pandas.io.stata.StataReader instead.",
FutureWarning)
if encoding is None:
import locale
self._encoding = locale.getpreferredencoding()
else:
self._encoding = encoding
self._missing_values = missing_values
self._parse_header(fname)
def file_headers(self):
"""
Returns all .dta file headers.
out: dict
Has keys typlist, data_label, lbllist, varlist, nvar, filetype,
ds_format, nobs, fmtlist, vlblist, time_stamp, srtlist, byteorder
"""
return self._header
def file_format(self):
"""
Returns the file format.
Returns
-------
out : int
Notes
-----
Format 113: Stata 8/9
Format 114: Stata 10/11
Format 115: Stata 12
"""
return self._header['ds_format']
def file_label(self):
"""
Returns the dataset's label.
Returns
-------
out: str
"""
return self._header['data_label']
def file_timestamp(self):
"""
Returns the date and time Stata recorded on last file save.
Returns
-------
out : str
"""
return self._header['time_stamp']
def variables(self):
"""
Returns a list of the dataset's StataVariables objects.
"""
return lmap(_StataVariable, zip(lrange(self._header['nvar']),
self._header['typlist'], self._header['varlist'],
self._header['srtlist'],
self._header['fmtlist'], self._header['lbllist'],
self._header['vlblist']))
def dataset(self, as_dict=False):
"""
Returns a Python generator object for iterating over the dataset.
Parameters
----------
as_dict : bool, optional
If as_dict is True, yield each row of observations as a dict.
If False, yields each row of observations as a list.
Returns
-------
Generator object for iterating over the dataset. Yields each row of
observations as a list by default.
Notes
-----
If missing_values is True during instantiation of StataReader then
observations with StataMissingValue(s) are not filtered and should
be handled by your application.
"""
try:
self._file.seek(self._data_location)
except Exception:
pass
if as_dict:
vars = lmap(str, self.variables())
for i in range(len(self)):
yield dict(zip(vars, self._next()))
else:
for i in range(self._header['nobs']):
yield self._next()
### Python special methods
def __len__(self):
"""
Return the number of observations in the dataset.
This value is taken directly from the header and includes observations
with missing values.
"""
return self._header['nobs']
def __getitem__(self, k):
"""
Seek to an observation indexed k in the file and return it, ordered
by Stata's output to the .dta file.
k is zero-indexed. Prefer using R.data() for performance.
"""
if not (isinstance(k, int)) or k < 0 or k > len(self)-1:
raise IndexError(k)
loc = self._data_location + sum(self._col_size()) * k
if self._file.tell() != loc:
self._file.seek(loc)
return self._next()
# Private methods
def _null_terminate(self, s, encoding):
null_byte = asbytes('\x00')
try:
s = s.lstrip(null_byte)[:s.index(null_byte)]
except Exception:
pass
return s.decode(encoding)
def _parse_header(self, file_object):
self._file = file_object
encoding = self._encoding
# parse headers
self._header['ds_format'] = unpack('b', self._file.read(1))[0]
if self._header['ds_format'] not in [113, 114, 115]:
raise ValueError("Only file formats >= 113 (Stata >= 9)"
" are supported. Got format %s. Please report "
"if you think this error is incorrect." %
self._header['ds_format'])
byteorder = self._header['byteorder'] = unpack('b',
self._file.read(1))[0]==0x1 and '>' or '<'
self._header['filetype'] = unpack('b', self._file.read(1))[0]
self._file.read(1)
nvar = self._header['nvar'] = unpack(byteorder+'h',
self._file.read(2))[0]
self._header['nobs'] = unpack(byteorder+'i', self._file.read(4))[0]
self._header['data_label'] = self._null_terminate(self._file.read(81),
encoding)
self._header['time_stamp'] = self._null_terminate(self._file.read(18),
encoding)
# parse descriptors
typlist =[ord(self._file.read(1)) for i in range(nvar)]
self._header['typlist'] = [self.TYPE_MAP[typ] for typ in typlist]
self._header['dtyplist'] = [self.DTYPE_MAP[typ] for typ in typlist]
self._header['varlist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['srtlist'] = unpack(byteorder+('h'*(nvar+1)),
self._file.read(2*(nvar+1)))[:-1]
if self._header['ds_format'] <= 113:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(12), encoding) \
for i in range(nvar)]
else:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(49), encoding) \
for i in range(nvar)]
self._header['lbllist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['vlblist'] = [self._null_terminate(self._file.read(81),
encoding) for i in range(nvar)]
# ignore expansion fields
# When reading, read five bytes; the last four bytes now tell you the
# size of the next read, which you discard. You then continue like
# this until you read 5 bytes of zeros.
while True:
data_type = unpack(byteorder+'b', self._file.read(1))[0]
data_len = unpack(byteorder+'i', self._file.read(4))[0]
if data_type == 0:
break
self._file.read(data_len)
# other state vars
self._data_location = self._file.tell()
self._has_string_data = len(lfilter(lambda x: isinstance(x, int),
self._header['typlist'])) > 0
self._col_size()
def _calcsize(self, fmt):
return isinstance(fmt, int) and fmt or \
calcsize(self._header['byteorder']+fmt)
def _col_size(self, k = None):
"""Calculate size of a data record."""
if len(self._col_sizes) == 0:
self._col_sizes = lmap(lambda x: self._calcsize(x),
self._header['typlist'])
if k is None:
return self._col_sizes
else:
return self._col_sizes[k]
def _unpack(self, fmt, byt):
d = unpack(self._header['byteorder']+fmt, byt)[0]
if fmt[-1] in self.MISSING_VALUES:
nmin, nmax = self.MISSING_VALUES[fmt[-1]]
if d < nmin or d > nmax:
if self._missing_values:
return StataMissingValue(nmax, d)
else:
return None
return d
def _next(self):
typlist = self._header['typlist']
if self._has_string_data:
data = [None]*self._header['nvar']
for i in range(len(data)):
if isinstance(typlist[i], int):
data[i] = self._null_terminate(self._file.read(typlist[i]),
self._encoding)
else:
data[i] = self._unpack(typlist[i],
self._file.read(self._col_size(i)))
return data
else:
return lmap(lambda i: self._unpack(typlist[i],
self._file.read(self._col_size(i))),
lrange(self._header['nvar']))
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _dtype_to_stata_type(dtype):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
251 - chr(251) - for int8 and int16, byte
252 - chr(252) - for int32, int
253 - chr(253) - for int64, long
254 - chr(254) - for float32, float
255 - chr(255) - double, double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
#TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_:
# try to coerce it to the biggest string
# not memory efficient, what else could we do?
return chr(244)
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int64:
return chr(253)
elif dtype == np.int32:
return chr(252)
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return chr(251)
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _dtype_to_default_stata_fmt(dtype):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
string -> "%DDs" where DD is the length of the string
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%9.0g"
int16 -> "%9.0g"
int8 -> "%8.0g"
"""
#TODO: expand this to handle a default datetime format?
if dtype.type == np.string_:
return "%" + str(dtype.itemsize) + "s"
elif dtype.type == np.object_:
return "%244s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int64:
return "%9.0g"
elif dtype == np.int32:
return "%8.0g"
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return "%8.0g"
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _pad_bytes(name, length):
"""
Takes a char string and pads it wih null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _default_names(nvar):
"""
Returns default Stata names v1, v2, ... vnvar
"""
return ["v%d" % i for i in range(1,nvar+1)]
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise ValueError("fmt %s not understood" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key) : convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convery_dates key is not in varlist "
"and is not an int")
new_dict.update({key : convert_dates[key]})
return new_dict
_type_converters = {253 : int, 252 : int}
class StataWriter(object):
"""
A class for writing Stata binary dta files from array-like objects
.. deprecated:: 0.11
Use pandas.read_stata or pandas.io.stata.StataReader
Parameters
----------
fname : file path or buffer
Where to save the dta file.
data : array_like
Array-like input to save. Pandas objects are also accepted.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'})
>>> writer.write_file()
"""
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(lzip(lrange(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = lrange(251)+list('bhlfd')
MISSING_VALUES = { 'b': 101,
'h': 32741,
'l' : 2147483621,
'f': 1.7014118346046923e+38,
'd': 8.98846567431158e+307}
def __init__(self, fname, data, convert_dates=None, encoding="latin-1",
byteorder=None):
warnings.warn(
"StataWriter is deprecated as of 0.10.0 and will be removed after"
" the 0.12 release. Use pandas.DataFrame.to_stata or "
"pandas.io.stata.StatWriter instead.",
FutureWarning)
self._convert_dates = convert_dates
# attach nobs, nvars, data, varlist, typlist
if data_util._is_using_pandas(data, None):
self._prepare_pandas(data)
elif data_util._is_array_like(data, None):
data = np.asarray(data)
if data_util._is_structured_ndarray(data):
self._prepare_structured_array(data)
else:
if convert_dates is not None:
raise ValueError("Not able to convert dates in a plain"
" ndarray.")
self._prepare_ndarray(data)
else: # pragma : no cover
raise ValueError("Type %s for data not understood" % type(data))
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._encoding = encoding
self._file = get_file_obj(fname, 'wb', encoding)
def _write(self, to_write):
"""
Helper to call asbytes before writing to file for Python 3 compat.
"""
self._file.write(asbytes(to_write))
def _prepare_structured_array(self, data):
self.nobs = len(data)
self.nvar = len(data.dtype)
self.data = data
self.datarows = iter(data)
dtype = data.dtype
descr = dtype.descr
if dtype.names is None:
varlist = _default_names(self.nvar)
else:
varlist = dtype.names
# check for datetime and change the type
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
varlist)
self._convert_dates = convert_dates
for key in convert_dates:
descr[key] = (
descr[key][0],
_convert_datetime_to_stata_type(convert_dates[key])
)
dtype = np.dtype(descr)
self.varlist = varlist
self.typlist = [_dtype_to_stata_type(dtype[i])
for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype[i])
for i in range(self.nvar)]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def _prepare_ndarray(self, data):
if data.ndim == 1:
data = data[:,None]
self.nobs, self.nvar = data.shape
self.data = data
self.datarows = iter(data)
#TODO: this should be user settable
dtype = data.dtype
self.varlist = _default_names(self.nvar)
self.typlist = [_dtype_to_stata_type(dtype) for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype)
for i in range(self.nvar)]
def _prepare_pandas(self, data):
#NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
class DataFrameRowIter(object):
def __init__(self, data):
self.data = data
def __iter__(self):
for i, row in data.iterrows():
yield row
data = data.reset_index()
self.datarows = DataFrameRowIter(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
self.varlist)
self._convert_dates = convert_dates
for key in convert_dates:
new_type = _convert_datetime_to_stata_type(convert_dates[key])
dtypes[key] = np.dtype(new_type)
self.typlist = [_dtype_to_stata_type(dt) for dt in dtypes]
self.fmtlist = [_dtype_to_default_stata_fmt(dt) for dt in dtypes]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def write_file(self):
self._write_header()
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
if self._convert_dates is None:
self._write_data_nodates()
else:
self._write_data_dates()
#self._write_value_labels()
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._write(pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._write(pack(byteorder+"h", self.nvar)[:2])
# number of obs, 4 bytes
self._write(pack(byteorder+"i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._write(self._null_terminate(_pad_bytes("", 80),
self._encoding))
else:
self._write(self._null_terminate(_pad_bytes(data_label[:80],
80), self._encoding))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime):
raise ValueError("time_stamp should be datetime type")
self._write(self._null_terminate(
time_stamp.strftime("%d %b %Y %H:%M"),
self._encoding))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist, length 33*nvar, char array, null terminated
for name in self.varlist:
name = self._null_terminate(name, self._encoding)
name = _pad_bytes(asstr(name[:32]), 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
#NOTE: this is where you could get fancy with pandas categorical type
for i in range(nvar):
self._write(_pad_bytes("", 33))
def _write_variable_labels(self, labels=None):
nvar = self.nvar
if labels is None:
for i in range(nvar):
self._write(_pad_bytes("", 81))
def _write_data_nodates(self):
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
var = _pad_bytes(asstr(var), len(var) + 1)
self._write(var)
else:
try:
if typ in _type_converters:
var = _type_converters[typ](var)
self._write(pack(byteorder+TYPE_MAP[typ], var))
except struct_error:
# have to be strict about type pack will not do any
# kind of casting
self._write(pack(byteorder+TYPE_MAP[typ],
_type_converters[typ](var)))
def _write_data_dates(self):
convert_dates = self._convert_dates
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
MISSING_VALUES = self.MISSING_VALUES
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
#NOTE: If anyone finds this terribly slow, there is
# a vectorized way to convert dates, see genfromdta for going
# from int to datetime and reverse it. will copy data though
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
if isnull(var):
var = "" # missing string
if len(var) < typ:
var = _pad_bytes(var, len(var) + 1)
self._write(var)
else:
if isnull(var): # this only matters for floats
var = MISSING_VALUES[typ]
self._write(pack(byteorder+TYPE_MAP[typ], var))
def _null_terminate(self, s, encoding):
null_byte = '\x00'
s += null_byte
return s.encode(encoding)
def genfromdta(fname, missing_flt=-999., encoding=None, pandas=False,
convert_dates=True):
"""
Returns an ndarray or DataFrame from a Stata .dta file.
.. deprecated:: 0.11
Use pandas.read_stata or pandas.io.stata.StataReader
Parameters
----------
fname : str or filehandle
Stata .dta file.
missing_flt : numeric
The numeric value to replace missing values with. Will be used for
any numeric value.
encoding : str, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
pandas : bool
Optionally return a DataFrame instead of an ndarray
convert_dates : bool
If convert_dates is True, then Stata formatted dates will be converted
to datetime types according to the variable's format.
"""
warnings.warn(
"genfromdta is deprecated as of 0.10.0 and will be removed after the "
"0.12 release future version. Use pandas.read_stata instead.",
FutureWarning)
if isinstance(fname, str):
fhd = StataReader(open(fname, 'rb'), missing_values=False,
encoding=encoding)
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = StataReader(fname, missing_values=False, encoding=encoding)
# validate_names = np.lib._iotools.NameValidator(excludelist=excludelist,
# deletechars=deletechars,
# case_sensitive=case_sensitive)
#TODO: This needs to handle the byteorder?
header = fhd.file_headers()
types = header['dtyplist']
nobs = header['nobs']
numvars = header['nvar']
varnames = header['varlist']
fmtlist = header['fmtlist']
dataname = header['data_label']
labels = header['vlblist'] # labels are thrown away unless DataArray
# type is used
data = np.zeros((nobs,numvars))
stata_dta = fhd.dataset()
dt = np.dtype(lzip(varnames, types))
data = np.zeros((nobs), dtype=dt) # init final array
for rownum,line in enumerate(stata_dta):
# does not handle missing value objects, just casts
# None will only work without missing value object.
if None in line:
for i,val in enumerate(line):
#NOTE: This will only be scalar types because missing strings
# are empty not None in Stata
if val is None:
line[i] = missing_flt
data[rownum] = tuple(line)
if pandas:
from pandas import DataFrame
data = DataFrame.from_records(data)
if convert_dates:
cols = np.where(lmap(lambda x : x in _date_formats, fmtlist))[0]
for col in cols:
i = col
col = data.columns[col]
data[col] = data[col].apply(_stata_elapsed_date_to_datetime,
args=(fmtlist[i],))
elif convert_dates:
# date_cols = np.where(map(lambda x : x in _date_formats,
# fmtlist))[0]
# make the dtype for the datetime types
cols = np.where(lmap(lambda x: x in _date_formats, fmtlist))[0]
dtype = data.dtype.descr
dtype = [(sub_dtype[0], object) if i in cols else sub_dtype
for i, sub_dtype in enumerate(dtype)]
data = data.astype(dtype) # have to copy
for col in cols:
def convert(x):
return _stata_elapsed_date_to_datetime(x, fmtlist[col])
data[data.dtype.names[col]] = lmap(convert,
data[data.dtype.names[col]])
return data
def savetxt(fname, X, names=None, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
This is just a copy of numpy.savetxt patched to support structured arrays
or a header of names. Does not include py3 support now in savetxt.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
names : list, optional
If given names will be the column header in the text file. If None and
X is a structured or recarray then the names are taken from
X.dtype.names.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : str of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # x is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
with get_file_obj(fname, 'w') as fh:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if isinstance(fmt, (list, tuple)):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif isinstance(fmt, str):
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
# handle names
if names is None and X.dtype.names:
names = X.dtype.names
if names is not None:
fh.write(delimiter.join(names) + '\n')
for row in X:
fh.write(format % tuple(row) + '\n')
|
|
import unittest
import numpy as np
import io
import inspect
from unittest.mock import Mock, patch, call
from io import StringIO
import os
from vasppy.summary import (Summary, md5sum, potcar_spec, find_vasp_calculations,
load_vasp_summary)
from vasppy.vaspmeta import VASPMeta
from pymatgen.io.vasp.outputs import Vasprun
test_data_dir = 'test_data'
mock_potcar_string = """foo
End of Dataset
bar
End of Dataset
sds
End of Dataset
"""
mock_potcar_data = { 'PBE': { 'A': '12',
'B': '34' },
'PBE_52': { 'C': '01',
'D': '23' },
'PBE_54': { 'E': '56',
'F': '78' },
'LDA': { 'G': '89' },
'LDA_52': { 'H': '101' },
'LDA_54': { 'I': '202' },
'GGA': { 'J': '303' },
'USPP_GGA': { 'K': '404' },
'USPP_LDA': { 'L': '505' },
'PBE_54r': { 'M': '123' },
'LDA_54r': { 'N': '456' } }
class SummaryInitTestCase( unittest.TestCase ):
@patch('vasppy.summary.VASPMeta')
@patch('vasppy.summary.Summary.parse_vasprun')
def test_summary_is_initialised( self, mock_parse_vasprun, MockVASPMeta ):
MockVASPMeta.from_file = Mock( return_value='foo' )
summary = Summary()
self.assertEqual( mock_parse_vasprun.call_count, 1 )
expected_print_methods = [ 'title', 'type', 'status', 'stoichiometry',
'potcar', 'eatom', 'energy', 'k-points',
'functional', 'encut', 'plus_u', 'ediffg',
'ibrion', 'converged', 'version', 'md5',
'directory', 'lreal', 'vbm', 'cbm' ]
for key in expected_print_methods:
self.assertTrue(key in summary.print_methods)
self.assertTrue( inspect.ismethod( summary.print_methods[ key ] ) )
@patch('vasppy.summary.VASPMeta')
@patch('vasppy.summary.Summary.parse_vasprun')
def test_summary_init_raises_filenotfounderror_if_file_is_not_found( self, mock_parse_vasprun, MockVASPMeta ):
MockVASPMeta.from_file = Mock( side_effect=FileNotFoundError )
with self.assertRaises( FileNotFoundError ):
summary = Summary()
class SummaryTestCase( unittest.TestCase ):
@patch('vasppy.summary.VASPMeta')
@patch('vasppy.summary.Summary.parse_vasprun')
def setUp( self, mock_parse_vaspun, MockVASPMeta ):
MockVASPMeta.from_file = Mock( return_value='foo' )
self.summary = Summary()
self.summary.vasprun = Mock( spec=Vasprun )
self.summary.meta = Mock( spec=VASPMeta )
self.summary.meta.notes = None
def test_functional_not_PBE( self ):
self.summary.potcars_are_pbe = Mock( return_value=False )
self.assertEqual( self.summary.functional, 'not recognised' )
def test_functional_is_PBE( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'PE' }
self.assertEqual( self.summary.functional, 'PBE' )
def test_functional_is_PBEsol( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'PS' }
self.assertEqual( self.summary.functional, 'PBEsol' )
def test_functional_is_PW91( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': '91' }
self.assertEqual( self.summary.functional, 'PW91' )
def test_functional_is_rPBE( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'RP' }
self.assertEqual( self.summary.functional, 'rPBE' )
def test_functional_is_AM05( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'AM' }
self.assertEqual( self.summary.functional, 'AM05' )
def test_functional_is_PBE0( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'AM', 'LHFCALC': 'True', 'AEXX': '0.25' }
self.assertEqual( self.summary.functional, 'PBE0' )
def test_functional_is_HSE06( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'AM', 'LHFCALC': 'True', 'AEXX': '0.25', 'HFSCREEN': '0.2' }
self.assertEqual( self.summary.functional, 'HSE06' )
def test_functional_is_a_PBE_hybrid( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'AM', 'LHFCALC': 'True', 'AEXX': '0.19' }
self.assertEqual( self.summary.functional, 'hybrid. alpha=0.19' )
def test_functional_is_a_screened_PBE_hybrid( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'AM', 'LHFCALC': 'True', 'AEXX': '0.19', 'HFSCREEN': '0.34' }
self.assertEqual( self.summary.functional, 'screened hybrid. alpha=0.19, mu=0.34' )
def test_functional_raises_KeyError_if_PBE_tag_is_invalid( self ):
self.summary.potcars_are_pbe = Mock( return_value=True )
self.summary.vasprun.parameters = { 'GGA': 'foo' }
with self.assertRaises( KeyError ):
self.summary.functional
@patch('sys.stdout', new_callable=StringIO)
def test_print_cbm( self, mock_stdout ):
summary = self.summary
summary.vasprun.eigenvalue_band_properties = [ 'null', 'CBM', 'VBM' ]
summary.print_cbm()
self.assertEqual( mock_stdout.getvalue(), 'cbm: CBM\n' )
@patch('sys.stdout', new_callable=StringIO)
def test_print_vbm( self, mock_stdout ):
self.summary.vasprun.eigenvalue_band_properties = [ 'null', 'CBM', 'VBM' ]
self.summary.print_vbm()
self.assertEqual( mock_stdout.getvalue(), 'vbm: VBM\n' )
@patch('sys.stdout', new_callable=StringIO)
def test_print_converged( self, mock_stdout ):
self.summary.vasprun.converged = 'conv'
self.summary.print_converged()
self.assertEqual( mock_stdout.getvalue(), 'converged: conv\n' )
def test_potcars_are_pbe_if_true( self ):
self.summary.vasprun.potcar_symbols = [ 'PAW_PBE Fe_pv 06Sep2000', 'PAW_PBE O 08Apr2002' ]
self.assertTrue( self.summary.potcars_are_pbe() )
def test_potcars_are_pbe_if_false( self ):
self.summary.vasprun.potcar_symbols = [ 'foo', 'PAW_PBE O 08Apr2002' ]
self.assertFalse( self.summary.potcars_are_pbe() )
@patch('sys.stdout', new_callable=StringIO)
def test_print_type( self, mock_stdout ):
self.summary.meta.type = 'TYPE'
self.summary.print_type()
self.assertEqual( mock_stdout.getvalue(), 'type: TYPE\n' )
@patch('sys.stdout', new_callable=StringIO)
def test_print_type_if_type_is_not_set( self, mock_stdout ):
self.summary.meta.type = None
self.summary.print_type()
self.assertEqual( mock_stdout.getvalue(), '' )
@patch('sys.stdout', new_callable=StringIO)
def test_print_title( self, mock_stdout ):
self.summary.meta.title = 'TITLE'
self.summary.print_title()
self.assertEqual( mock_stdout.getvalue(), 'title: TITLE\n' )
@patch('sys.stdout', new_callable=StringIO)
def test_print_notes( self, mock_stdout ):
self.summary.meta.notes = 'NOTES'
self.summary.print_notes()
self.assertEqual( mock_stdout.getvalue(), 'notes: NOTES\n' )
@patch('sys.stdout', new_callable=StringIO)
def test_print_notes_handles_empty_notes_attribute( self, mock_stdout ):
self.summary.print_notes()
self.assertEqual( mock_stdout.getvalue(), 'notes: ~\n' )
class SummaryHelperFunctionsTestCase( unittest.TestCase ):
def test_md5sum( self ):
self.assertEqual( md5sum('hello\n'), 'b1946ac92492d2347c6235b4d2611184' )
def test_potcar_spec(self):
mock_potcar_filename = 'POTCAR'
md5sum_return_values = ('12', '56', '23')
with patch('builtins.open', return_value=io.StringIO(mock_potcar_string)) as mock_open:
with patch('vasppy.summary.md5sum', side_effect=md5sum_return_values) as mock_md5sum:
with patch.dict('vasppy.data.potcar_data.potcar_md5sum_data', mock_potcar_data, clear=True):
p_spec = potcar_spec(mock_potcar_filename)
mock_open.assert_called_with(mock_potcar_filename, 'r')
mock_md5sum.assert_has_calls([ call('foo\nEnd of Dataset\n'),
call('bar\nEnd of Dataset\n'),
call('sds\nEnd of Dataset\n')])
self.assertEqual( p_spec, {'A': 'PBE', 'E': 'PBE_54', 'D': 'PBE_52'} )
def test_potcar_spec_returns_hashes(self):
mock_potcar_filename = 'POTCAR'
md5sum_return_values = ('12', '56', '23')
with patch('builtins.open', return_value=io.StringIO(mock_potcar_string)) as mock_open:
with patch('vasppy.summary.md5sum', side_effect=md5sum_return_values) as mock_md5sum:
with patch.dict('vasppy.data.potcar_data.potcar_md5sum_data', mock_potcar_data, clear=True):
p_spec = potcar_spec(mock_potcar_filename, return_hashes=True)
mock_open.assert_called_with(mock_potcar_filename, 'r')
mock_md5sum.assert_has_calls([ call('foo\nEnd of Dataset\n'),
call('bar\nEnd of Dataset\n'),
call('sds\nEnd of Dataset\n')])
self.assertEqual(p_spec, {'A': '12', 'E': '56', 'D': '23'})
def test_potcar_spec_raises_valueerror_if_md5sum_not_matched( self ):
mock_potcar_filename = 'POTCAR'
md5sum_return_values = ( '12', '56', '90' )
with patch('builtins.open', return_value=io.StringIO(mock_potcar_string)) as mock_open:
with patch('vasppy.summary.md5sum', side_effect=md5sum_return_values ) as mock_md5sum:
with patch.dict('vasppy.data.potcar_data.potcar_md5sum_data', mock_potcar_data, clear=True ):
with self.assertRaises( ValueError ):
potcar_spec( mock_potcar_filename )
def test_find_vasp_calculations( self ):
mock_glob_output = [ 'dir_A/vasprun.xml', 'dir_B/dir_C/vasprun.xml' ]
with patch('glob.iglob', side_effect=[mock_glob_output, []]) as mock_glob:
v = find_vasp_calculations()
self.assertEqual( v, [ './dir_A/', './dir_B/dir_C/' ] )
def test_load_vasp_summary( self ):
vasp_summary_test_filename = os.path.join( os.path.dirname( __file__ ), test_data_dir, 'vasp_summary_test.yaml' )
expected_dict = { 'foo': { 'title': 'foo', 'data': 'foo_data' },
'bar': { 'title': 'bar', 'data': 'bar_data' } }
vasp_summary = load_vasp_summary( vasp_summary_test_filename )
self.assertEqual( vasp_summary, expected_dict )
if __name__ == '__main__':
unittest.main()
|
|
#
# Copyright 2012 ibiblio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from terasaur.db import torrent_db
from terasaur.mixin.timestamp import TimestampMixin
from terasaur.torrent.util import is_valid_info_hash
class TorrentException(Exception): pass
class Torrent(TimestampMixin):
__slots__ = ('_id', 'info_hash', 'created', 'updated', 'published', 'peers', 'seeds', 'completed', 'seedbanks')
"""
pymongo.ObjectId _id (mongodb oid)
string info_hash (unique)
datetime created
datetime updated
datetime published
int peers
int seeds
int completed
list seedbanks -- list of seedbank id integers
"""
def __init__(self):
self._id = None
self.info_hash = None
self.created = None
self.updated = None
self.published = None
self.peers = 0
self.seeds = 0
self.completed = 0
self.seedbanks = None
@staticmethod
def find(**kwargs):
"""
Find single:
- Query by info hash and return Torrent object
Find multiple:
- Return list of Torrent objects
"""
info_hash = kwargs.get('info_hash', None)
if info_hash is not None:
return Torrent._find_single(info_hash)
else:
return Torrent._find_multiple(kwargs.get('query', None))
@staticmethod
def _find_single(info_hash, torrent_root=None):
Torrent._validate_info_hash(info_hash)
data = torrent_db.get(info_hash)
return Torrent._data_to_torrent(data)
@staticmethod
def _find_multiple(query=None):
data_list = torrent_db.find(query)
return Torrent._gen_find_results(data_list)
@staticmethod
def _gen_find_results(data_list):
torrent_list = []
for data in data_list:
t = Torrent._data_to_torrent(data)
torrent_list.append(t)
return torrent_list
@staticmethod
def _data_to_torrent(data):
if data:
t = Torrent()
t._id = data['_id']
t.info_hash = data['info_hash']
t.created = data['created']
t.updated = data['updated']
t.published = data['published']
t.peers = data['peers']
t.seeds = data['seeds']
t.completed = data['completed']
t.seedbanks = data['seedbanks']
else:
t = None
return t
def save(self, override_updated=False):
self.validate()
save_dict = self._get_save_dict(override_updated)
torrent_db.save(save_dict)
if not self.created:
self.created = save_dict['created']
self.updated = save_dict['updated']
def validate(self):
self._validate_info_hash(self.info_hash)
@staticmethod
def _validate_info_hash(info_hash):
if not info_hash:
raise TorrentException('Missing info hash')
if not is_valid_info_hash(info_hash):
raise TorrentException('Invalid info hash')
def _get_save_dict(self, override_updated=False):
created_date = self.created if self.created else self._get_now()
if not override_updated or self.updated is None:
self.updated = self._get_now()
published = self.published if self.published else self._get_now()
save_dict = {
'info_hash': self.info_hash,
'created': created_date,
'updated': self.updated,
'published': published,
'peers': long(self.peers),
'seeds': long(self.seeds),
'completed': long(self.completed)
}
if self._id:
save_dict['_id'] = self._id
else:
# don't save seedbank list here. use add/remove seedbank methods.
save_dict['seedbanks'] = self.seedbanks
return save_dict
def delete(self):
self._validate_info_hash(self.info_hash)
torrent_db.delete(self.info_hash)
def __str__(self):
return '%s: Peers: %s, Seeds: %s, Completed: %s, Updated: %s' % (self.info_hash, self.peers, self.seeds, self.completed, self.updated)
def add_seedbank(self, seedbank, override_update=False):
"""
Add seedbank to torrent and save changes. Does not raise an exception
if the torrent doesn't have the given seedbank.
"""
if not seedbank:
raise TorrentException('Cannot add null seedbank to torrent')
if self._has_seedbank(seedbank):
raise TorrentException('Torrent already has seedbank (' + str(seedbank) + ')')
if self.seedbanks is None:
self.seedbanks = []
if not seedbank.id in self.seedbanks:
self.seedbanks.append(seedbank.id)
self._save_seedbanks(override_update)
def remove_seedbank(self, seedbank, override_updated=False):
"""
Remove seedbank from torrent and save changes. Does not raise an exception
if the torrent doesn't have the given seedbank.
"""
if not seedbank:
raise TorrentException('Cannot remove null seedbank from torrent')
if seedbank.id in self.seedbanks:
self.seedbanks.remove(seedbank.id)
self._save_seedbanks(override_updated)
def _has_seedbank(self, seedbank):
if self.seedbanks is None:
return False
if seedbank.id in self.seedbanks:
return True
else:
return False
def _save_seedbanks(self, override_updated=False):
query = { "_id": self._id }
data = { "$set": { 'seedbanks': self.seedbanks } }
if not override_updated or self.updated is None:
self.updated = self._get_now()
data["$set"]['updated'] = self.updated
torrent_db.update(query, data)
class TorrentManager(object):
@staticmethod
def add(info_hash):
t = Torrent.find(info_hash=info_hash)
if t:
raise TorrentException('Torrent already exists in tracker (' + info_hash + ')')
t = Torrent()
t.info_hash = info_hash
t.save()
@staticmethod
def remove(info_hash):
t = Torrent.find(info_hash=info_hash)
if not t:
raise TorrentException('Could not find torrent in tracker (' + info_hash + ')')
t.delete()
|
|
# -*- coding: utf-8 -*-
"""
jinja.utils
~~~~~~~~~~~
Utility functions.
**license information**: some of the regular expressions and
the ``urlize`` function were taken from the django framework.
:copyright: 2007 by Armin Ronacher, Lawrence Journal-World.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import string
from types import MethodType, FunctionType
from jinja import nodes
from jinja.exceptions import SecurityException, TemplateNotFound
from jinja.datastructure import TemplateData
# the python2.4 version of deque is missing the remove method
# because a for loop with a lookup for the missing value written
# in python is slower we just use deque if we have python2.5 or higher
try:
from collections import deque
deque.remove
except (ImportError, AttributeError):
class deque(list):
"""
Minimal subclass of list that provides the deque
interface used by the native `BaseContext` and the
`CacheDict`
"""
def appendleft(self, item):
list.insert(self, 0, item)
def popleft(self):
return list.pop(self, 0)
def clear(self):
del self[:]
# support for a working reversed() in 2.3
try:
reversed = reversed
except NameError:
def reversed(iterable):
if hasattr(iterable, '__reversed__'):
return iterable.__reversed__()
try:
return iter(iterable[::-1])
except TypeError:
return iter(tuple(iterable)[::-1])
# set support for python 2.3
try:
set = set
except NameError:
from sets import Set as set
# sorted support (just a simplified version)
try:
sorted = sorted
except NameError:
_cmp = cmp
def sorted(seq, cmp=None, key=None, reverse=False):
rv = list(seq)
if key is not None:
cmp = lambda a, b: _cmp(key(a), key(b))
rv.sort(cmp)
if reverse:
rv.reverse()
return rv
# group by support
try:
from itertools import groupby
except ImportError:
class groupby(object):
def __init__(self, iterable, key=lambda x: x):
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = xrange(0)
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = self.it.next()
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = self.it.next()
self.currkey = self.keyfunc(self.currvalue)
#: function types
callable_types = (FunctionType, MethodType)
#: number of maximal range items
MAX_RANGE = 1000000
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join([re.escape(p) for p in ('(', '<', '<')]),
'|'.join([re.escape(p) for p in ('.', ',', ')', '>', '\n', '>')])
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
#: used by from_string as cache
_from_string_env = None
def escape(s, quote=None):
"""
SGML/XML escape an unicode object.
"""
s = s.replace("&", "&").replace("<", "<").replace(">", ">")
if not quote:
return s
return s.replace('"', """)
def urlize(text, trim_url_limit=None, nofollow=False):
"""
Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(text)
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
len(middle) > 0 and
middle[0] in string.letters + string.digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def from_string(source):
"""
Create a template from the template source.
"""
global _from_string_env
if _from_string_env is None:
from jinja.environment import Environment
_from_string_env = Environment()
return _from_string_env.from_string(source)
#: minor speedup
_getattr = getattr
def get_attribute(obj, name):
"""
Return the attribute from name. Raise either `AttributeError`
or `SecurityException` if something goes wrong.
"""
if not isinstance(name, basestring):
raise AttributeError(name)
if name[:2] == name[-2:] == '__':
raise SecurityException('not allowed to access internal attributes')
if getattr(obj, '__class__', None) in callable_types and \
name.startswith('func_') or name.startswith('im_'):
raise SecurityException('not allowed to access function attributes')
r = _getattr(obj, 'jinja_allowed_attributes', None)
if r is not None and name not in r:
raise SecurityException('disallowed attribute accessed')
# attribute lookups convert unicode strings to ascii bytestrings.
# this process could raise an UnicodeEncodeError.
try:
return _getattr(obj, name)
except UnicodeError:
raise AttributeError(name)
def safe_range(start, stop=None, step=None):
"""
"Safe" form of range that does not generate too large lists.
"""
if step is None:
step = 1
if stop is None:
r = xrange(0, start, step)
else:
r = xrange(start, stop, step)
if len(r) > MAX_RANGE:
def limit():
i = 0
for item in r:
i += 1
yield item
if i >= MAX_RANGE:
break
return list(limit())
return list(r)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""
Generate some lorem impsum for the template.
"""
from jinja.constants import LOREM_IPSUM_WORDS
from random import choice, random, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in xrange(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(xrange(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return u'\n'.join([u'<p>%s</p>' % escape(x) for x in result])
def watch_changes(env, context, iterable, *attributes):
"""
Wise replacement for ``{% ifchanged %}``.
"""
# find the attributes to watch
if attributes:
tests = []
tmp = []
for attribute in attributes:
if isinstance(attribute, (str, unicode, int, long, bool)):
tmp.append(attribute)
else:
tests.append(tuple(attribute))
if tmp:
tests.append(tuple(attribute))
last = tuple([object() for x in tests])
# or no attributes if we watch the object itself
else:
tests = None
last = object()
# iterate trough it and keep check the attributes or values
for item in iterable:
if tests is None:
cur = item
else:
cur = tuple([env.get_attributes(item, x) for x in tests])
if cur != last:
changed = True
last = cur
else:
changed = False
yield changed, item
watch_changes.jinja_context_callable = True
def render_included(env, context, template_name):
"""
Works like djangos {% include %} tag. It doesn't include the
template but load it independently and renders it to a string.
"""
#XXX: ignores parent completely!
tmpl = env.get_template(template_name)
return tmpl.render(context.to_dict())
render_included.jinja_context_callable = True
# python2.4 and lower has a bug regarding joining of broken generators.
# because of the runtime debugging system we have to keep track of the
# number of frames to skip. that's what RUNTIME_EXCEPTION_OFFSET is for.
try:
_test_singleton = object()
def _test_gen_bug():
raise TypeError(_test_singleton)
yield None
''.join(_test_gen_bug())
except TypeError, e:
if e.args and e.args[0] is _test_singleton:
capture_generator = u''.join
RUNTIME_EXCEPTION_OFFSET = 1
else:
capture_generator = lambda gen: u''.join(tuple(gen))
RUNTIME_EXCEPTION_OFFSET = 2
del _test_singleton, _test_gen_bug
def pformat(obj, verbose=False):
"""
Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def buffereater(f, template_data=False):
"""
Used by the python translator to capture output of substreams.
(macros, filter sections etc)
"""
def wrapped(*a, **kw):
__traceback_hide__ = True
rv = capture_generator(f(*a, **kw))
if template_data:
rv = TemplateData(rv)
return rv
return wrapped
def collect_translations(ast):
"""
Collect all translatable strings for the given ast. The
return value is a list of tuples in the form ``(lineno, singular,
plural)``. If a translation doesn't require a plural form the
third item is `None`.
"""
todo = [ast]
result = []
while todo:
node = todo.pop()
if node.__class__ is nodes.Trans:
result.append((node.lineno, node.singular, node.plural))
elif node.__class__ is nodes.CallExpression and \
node.node.__class__ is nodes.NameExpression and \
node.node.name == '_':
if len(node.args) == 1 and not node.kwargs and not node.dyn_args \
and not node.dyn_kwargs and \
node.args[0].__class__ is nodes.ConstantExpression:
result.append((node.lineno, node.args[0].value, None))
todo.extend(node.get_child_nodes())
result.sort(lambda a, b: cmp(a[0], b[0]))
return result
class DebugHelper(object):
"""
Debugging Helper. Available in the template as "debug".
"""
jinja_context_callable = True
jinja_allowed_attributes = ['filters']
def __init__(self):
raise TypeError('cannot create %r instances' %
self.__class__.__name__)
def __call__(self, env, context):
"""Print a nice representation of the context."""
return pformat(context.to_dict(), verbose=True)
def filters(self, env, context, builtins=True):
"""List the filters."""
from inspect import getdoc
strip = set()
if not builtins:
from jinja.defaults import DEFAULT_FILTERS
strip = set(DEFAULT_FILTERS.values())
filters = env.filters.items()
filters.sort(lambda a, b: cmp(a[0].lower(), b[0].lower()))
result = []
for name, f in filters:
if f in strip:
continue
doc = '\n'.join([' ' + x for x in (getdoc(f) or '').splitlines()])
result.append('`%s`\n\n%s' % (name, doc))
return '\n\n'.join(result)
filters.jinja_context_callable = True
def tests(self, env, context, builtins=True):
"""List the tests."""
from inspect import getdoc
strip = set()
if not builtins:
from jinja.defaults import DEFAULT_TESTS
strip = set(DEFAULT_TESTS.values())
tests = env.tests.items()
tests.sort(lambda a, b: cmp(a[0].lower(), b[0].lower()))
result = []
for name, f in tests:
if f in strip:
continue
doc = '\n'.join([' ' + x for x in (getdoc(f) or '').splitlines()])
result.append('`%s`\n\n%s' % (name, doc))
return '\n\n'.join(result)
tests.jinja_context_callable = True
def __str__(self):
print 'use debug() for debugging the context'
#: the singleton instance of `DebugHelper`
debug_helper = object.__new__(DebugHelper)
class CacheDict(object):
"""
A dict like object that stores a limited number of items and forgets
about the least recently used items::
>>> cache = CacheDict(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
If we now access 'A' again it has a higher priority than B::
>>> cache['A']
0
If we add a new item 'D' now 'B' will disappear::
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
If you iterate over the object the most recently used item will be
yielded First::
>>> for item in cache:
... print item
D
A
C
If you want to iterate the other way round use ``reverse(cache)``.
Implementation note: This is not a nice way to solve that problem but
for smaller capacities it's faster than a linked list.
Perfect for template environments where you don't expect too many
different keys.
"""
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._append = self._queue.append
def copy(self):
"""
Return an shallow copy of the instance.
"""
rv = CacheDict(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = self._queue[:]
return rv
def get(self, key, default=None):
"""
Return an item from the cache dict or `default`
"""
if key in self:
return self[key]
return default
def setdefault(self, key, default=None):
"""
Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
if key in self:
return self[key]
self[key] = default
return default
def clear(self):
"""
Clear the cache dict.
"""
self._mapping.clear()
self._queue.clear()
def __contains__(self, key):
"""
Check if a key exists in this cache dict.
"""
return key in self._mapping
def __len__(self):
"""
Return the current size of the cache dict.
"""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""
Get an item from the cache dict. Moves the item up so that
it has the highest priority then.
Raise an `KeyError` if it does not exist.
"""
rv = self._mapping[key]
if self._queue[-1] != key:
self._remove(key)
self._append(key)
return rv
def __setitem__(self, key, value):
"""
Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
def __delitem__(self, key):
"""
Remove an item from the cache dict.
Raise an `KeyError` if it does not exist.
"""
del self._mapping[key]
self._remove(key)
def __iter__(self):
"""
Iterate over all values in the cache dict, ordered by
the most recent usage.
"""
return reversed(self._queue)
def __reversed__(self):
"""
Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(self._queue)
__copy__ = copy
def __deepcopy__(self):
"""
Return a deep copy of the cache dict.
"""
from copy import deepcopy
rv = CacheDict(self.capacity)
rv._mapping = deepcopy(self._mapping)
rv._queue = deepcopy(self._queue)
return rv
NAMESPACE = {
'range': safe_range,
'debug': debug_helper,
'lipsum': generate_lorem_ipsum,
'watchchanges': watch_changes,
'rendertemplate': render_included
}
|
|
# Copyright (c) 2014 Hoang Do, Phuc Vo, P. Michiardi, D. Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.plugins import exceptions as ex
from sahara.plugins import provisioning as p
from sahara.plugins import recommendations_utils as ru
from sahara.plugins.spark import config_helper as c_helper
from sahara.plugins.spark import edp_engine
from sahara.plugins.spark import run_scripts as run
from sahara.plugins.spark import scaling as sc
from sahara.plugins.spark import shell_engine
from sahara.plugins import utils
from sahara.swift import swift_helper
from sahara.topology import topology_helper as th
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import files as f
from sahara.utils import general as ug
from sahara.utils import remote
conductor = conductor.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SparkProvider(p.ProvisioningPluginBase):
def __init__(self):
self.processes = {
"HDFS": ["namenode", "datanode"],
"Spark": ["master", "slave"]
}
def get_title(self):
return "Apache Spark"
def get_description(self):
return _("This plugin provides an ability to launch Spark on Hadoop "
"CDH cluster without any management consoles.")
def get_labels(self):
default = {'enabled': {'status': True}, 'stable': {'status': True}}
deprecated = {'enabled': {'status': True},
'deprecated': {'status': True}}
result = {'plugin_labels': copy.deepcopy(default)}
stable_versions = ['2.1.0', '1.6.0']
result['version_labels'] = {
version: copy.deepcopy(
default if version in stable_versions else deprecated
) for version in self.get_versions()
}
return result
def get_versions(self):
return ['2.1.0', '1.6.0', '1.3.1']
def get_configs(self, hadoop_version):
return c_helper.get_plugin_configs()
def get_node_processes(self, hadoop_version):
return self.processes
def validate(self, cluster):
nn_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "namenode")])
if nn_count != 1:
raise ex.InvalidComponentCountException("namenode", 1, nn_count)
dn_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "datanode")])
if dn_count < 1:
raise ex.InvalidComponentCountException("datanode", _("1 or more"),
nn_count)
rep_factor = utils.get_config_value_or_default('HDFS',
"dfs.replication",
cluster)
if dn_count < rep_factor:
raise ex.InvalidComponentCountException(
'datanode', _('%s or more') % rep_factor, dn_count,
_('Number of %(dn)s instances should not be less '
'than %(replication)s')
% {'dn': 'datanode', 'replication': 'dfs.replication'})
# validate Spark Master Node and Spark Slaves
sm_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "master")])
if sm_count < 1:
raise ex.RequiredServiceMissingException("Spark master")
if sm_count >= 2:
raise ex.InvalidComponentCountException("Spark master", "1",
sm_count)
sl_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "slave")])
if sl_count < 1:
raise ex.InvalidComponentCountException("Spark slave",
_("1 or more"),
sl_count)
def update_infra(self, cluster):
pass
def configure_cluster(self, cluster):
self._setup_instances(cluster)
@cpo.event_wrapper(
True, step=utils.start_process_event_message("NameNode"))
def _start_namenode(self, nn_instance):
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
def start_spark(self, cluster):
sm_instance = utils.get_instance(cluster, "master")
if sm_instance:
self._start_spark(cluster, sm_instance)
@cpo.event_wrapper(
True, step=utils.start_process_event_message("SparkMasterNode"))
def _start_spark(self, cluster, sm_instance):
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info("Spark service has been started")
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
dn_instances = utils.get_instances(cluster, "datanode")
# Start the name node
self._start_namenode(nn_instance)
# start the data nodes
self._start_datanode_processes(dn_instances)
run.await_datanodes(cluster)
LOG.info("Hadoop services have been started")
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
self.start_spark(cluster)
swift_helper.install_ssl_certs(utils.get_instances(cluster))
LOG.info('Cluster has been started successfully')
self._set_cluster_info(cluster)
def _spark_home(self, cluster):
return utils.get_config_value_or_default("Spark",
"Spark home",
cluster)
def _extract_configs_to_extra(self, cluster):
sp_master = utils.get_instance(cluster, "master")
sp_slaves = utils.get_instances(cluster, "slave")
extra = dict()
config_master = config_slaves = ''
if sp_master is not None:
config_master = c_helper.generate_spark_env_configs(cluster)
if sp_slaves is not None:
slavenames = []
for slave in sp_slaves:
slavenames.append(slave.hostname())
config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
else:
config_slaves = "\n"
# Any node that might be used to run spark-submit will need
# these libs for swift integration
config_defaults = c_helper.generate_spark_executor_classpath(cluster)
extra['job_cleanup'] = c_helper.generate_job_cleanup_config(cluster)
extra['sp_master'] = config_master
extra['sp_slaves'] = config_slaves
extra['sp_defaults'] = config_defaults
if c_helper.is_data_locality_enabled(cluster):
topology_data = th.generate_topology_map(
cluster, CONF.enable_hypervisor_awareness)
extra['topology_data'] = "\n".join(
[k + " " + v for k, v in topology_data.items()]) + "\n"
return extra
def _add_instance_ng_related_to_extra(self, cluster, instance, extra):
extra = extra.copy()
ng = instance.node_group
nn = utils.get_instance(cluster, "namenode")
extra['xml'] = c_helper.generate_xml_configs(
ng.configuration(), instance.storage_paths(), nn.hostname(), None)
extra['setup_script'] = c_helper.generate_hadoop_setup_script(
instance.storage_paths(),
c_helper.extract_hadoop_environment_confs(ng.configuration()))
return extra
def _start_datanode_processes(self, dn_instances):
if len(dn_instances) == 0:
return
cpo.add_provisioning_step(
dn_instances[0].cluster_id,
utils.start_process_event_message("DataNodes"), len(dn_instances))
with context.ThreadGroup() as tg:
for i in dn_instances:
tg.spawn('spark-start-dn-%s' % i.instance_name,
self._start_datanode, i)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _start_datanode(self, instance):
with instance.remote() as r:
run.start_processes(r, "datanode")
def _setup_instances(self, cluster, instances=None):
extra = self._extract_configs_to_extra(cluster)
if instances is None:
instances = utils.get_instances(cluster)
self._push_configs_to_nodes(cluster, extra, instances)
def _push_configs_to_nodes(self, cluster, extra, new_instances):
all_instances = utils.get_instances(cluster)
cpo.add_provisioning_step(
cluster.id, _("Push configs to nodes"), len(all_instances))
with context.ThreadGroup() as tg:
for instance in all_instances:
extra = self._add_instance_ng_related_to_extra(
cluster, instance, extra)
if instance in new_instances:
tg.spawn('spark-configure-%s' % instance.instance_name,
self._push_configs_to_new_node, cluster,
extra, instance)
else:
tg.spawn('spark-reconfigure-%s' % instance.instance_name,
self._push_configs_to_existing_node, cluster,
extra, instance)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _push_configs_to_new_node(self, cluster, extra, instance):
files_hadoop = {
os.path.join(c_helper.HADOOP_CONF_DIR,
"core-site.xml"): extra['xml']['core-site'],
os.path.join(c_helper.HADOOP_CONF_DIR,
"hdfs-site.xml"): extra['xml']['hdfs-site'],
}
sp_home = self._spark_home(cluster)
files_spark = {
os.path.join(sp_home, 'conf/spark-env.sh'): extra['sp_master'],
os.path.join(sp_home, 'conf/slaves'): extra['sp_slaves'],
os.path.join(sp_home,
'conf/spark-defaults.conf'): extra['sp_defaults']
}
files_init = {
'/tmp/sahara-hadoop-init.sh': extra['setup_script'],
'id_rsa': cluster.management_private_key,
'authorized_keys': cluster.management_public_key
}
# pietro: This is required because the (secret) key is not stored in
# .ssh which hinders password-less ssh required by spark scripts
key_cmd = ('sudo cp $HOME/id_rsa $HOME/.ssh/; '
'sudo chown $USER $HOME/.ssh/id_rsa; '
'sudo chmod 600 $HOME/.ssh/id_rsa')
storage_paths = instance.storage_paths()
dn_path = ' '.join(c_helper.make_hadoop_path(storage_paths,
'/dfs/dn'))
nn_path = ' '.join(c_helper.make_hadoop_path(storage_paths,
'/dfs/nn'))
hdfs_dir_cmd = ('sudo mkdir -p %(nn_path)s %(dn_path)s &&'
'sudo chown -R hdfs:hadoop %(nn_path)s %(dn_path)s &&'
'sudo chmod 755 %(nn_path)s %(dn_path)s' %
{"nn_path": nn_path, "dn_path": dn_path})
with remote.get_remote(instance) as r:
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.execute_command(
'sudo chown -R $USER:$USER %s' % sp_home
)
r.write_files_to(files_hadoop)
r.write_files_to(files_spark)
r.write_files_to(files_init)
r.execute_command(
'sudo chmod 0500 /tmp/sahara-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/sahara-hadoop-init.sh '
'>> /tmp/sahara-hadoop-init.log 2>&1')
r.execute_command(hdfs_dir_cmd)
r.execute_command(key_cmd)
if c_helper.is_data_locality_enabled(cluster):
r.write_file_to(
'/etc/hadoop/topology.sh',
f.get_file_text(
'plugins/spark/resources/topology.sh'))
r.execute_command(
'sudo chmod +x /etc/hadoop/topology.sh'
)
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
self._push_cleanup_job(r, cluster, extra, instance)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _push_configs_to_existing_node(self, cluster, extra, instance):
node_processes = instance.node_group.node_processes
need_update_hadoop = (c_helper.is_data_locality_enabled(cluster) or
'namenode' in node_processes)
need_update_spark = ('master' in node_processes or
'slave' in node_processes)
if need_update_spark:
sp_home = self._spark_home(cluster)
files = {
os.path.join(sp_home,
'conf/spark-env.sh'): extra['sp_master'],
os.path.join(sp_home, 'conf/slaves'): extra['sp_slaves'],
os.path.join(
sp_home,
'conf/spark-defaults.conf'): extra['sp_defaults']
}
r = remote.get_remote(instance)
r.write_files_to(files)
self._push_cleanup_job(r, cluster, extra, instance)
if need_update_hadoop:
with remote.get_remote(instance) as r:
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
def _write_topology_data(self, r, cluster, extra):
if c_helper.is_data_locality_enabled(cluster):
topology_data = extra['topology_data']
r.write_file_to('/etc/hadoop/topology.data', topology_data)
def _push_master_configs(self, r, cluster, extra, instance):
node_processes = instance.node_group.node_processes
if 'namenode' in node_processes:
self._push_namenode_configs(cluster, r)
def _push_cleanup_job(self, r, cluster, extra, instance):
node_processes = instance.node_group.node_processes
if 'master' in node_processes:
if extra['job_cleanup']['valid']:
r.write_file_to('/etc/hadoop/tmp-cleanup.sh',
extra['job_cleanup']['script'])
r.execute_command("chmod 755 /etc/hadoop/tmp-cleanup.sh")
cmd = 'sudo sh -c \'echo "%s" > /etc/cron.d/spark-cleanup\''
r.execute_command(cmd % extra['job_cleanup']['cron'])
else:
r.execute_command("sudo rm -f /etc/hadoop/tmp-cleanup.sh")
r.execute_command("sudo rm -f /etc/crond.d/spark-cleanup")
def _push_namenode_configs(self, cluster, r):
r.write_file_to('/etc/hadoop/dn.incl',
utils.generate_fqdn_host_names(
utils.get_instances(cluster, "datanode")))
r.write_file_to('/etc/hadoop/dn.excl', '')
def _set_cluster_info(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
info = {}
if nn:
address = utils.get_config_value_or_default(
'HDFS', 'dfs.http.address', cluster)
port = address[address.rfind(':') + 1:]
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), port)
}
info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()
if sp_master:
port = utils.get_config_value_or_default(
'Spark', 'Master webui port', cluster)
if port is not None:
info['Spark'] = {
'Web UI': 'http://%s:%s' % (
sp_master.get_ip_or_dns_name(), port)
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
# Scaling
def validate_scaling(self, cluster, existing, additional):
self._validate_existing_ng_scaling(cluster, existing)
self._validate_additional_ng_scaling(cluster, additional)
def decommission_nodes(self, cluster, instances):
sls = utils.get_instances(cluster, "slave")
dns = utils.get_instances(cluster, "datanode")
decommission_dns = False
decommission_sls = False
for i in instances:
if 'datanode' in i.node_group.node_processes:
dns.remove(i)
decommission_dns = True
if 'slave' in i.node_group.node_processes:
sls.remove(i)
decommission_sls = True
nn = utils.get_instance(cluster, "namenode")
spark_master = utils.get_instance(cluster, "master")
if decommission_sls:
sc.decommission_sl(spark_master, instances, sls)
if decommission_dns:
sc.decommission_dn(nn, instances, dns)
def scale_cluster(self, cluster, instances):
master = utils.get_instance(cluster, "master")
r_master = remote.get_remote(master)
run.stop_spark(r_master, self._spark_home(cluster))
self._setup_instances(cluster, instances)
nn = utils.get_instance(cluster, "namenode")
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
dn_instances = [instance for instance in instances if
'datanode' in instance.node_group.node_processes]
self._start_datanode_processes(dn_instances)
swift_helper.install_ssl_certs(instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info("Spark master service has been restarted")
def _get_scalable_processes(self):
return ["datanode", "slave"]
def _validate_additional_ng_scaling(self, cluster, additional):
scalable_processes = self._get_scalable_processes()
for ng_id in additional:
ng = ug.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: %s") %
' '.join(ng.node_processes))
def _validate_existing_ng_scaling(self, cluster, existing):
scalable_processes = self._get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and ("datanode" in
ng.node_processes):
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: %s") %
' '.join(ng.node_processes))
dn_amount = len(utils.get_instances(cluster, "datanode"))
rep_factor = utils.get_config_value_or_default('HDFS',
"dfs.replication",
cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled(
cluster.name, _("Spark plugin cannot shrink cluster because "
"there would be not enough nodes for HDFS "
"replicas (replication factor is %s)") %
rep_factor)
def get_edp_engine(self, cluster, job_type):
if edp_engine.EdpEngine.job_type_supported(job_type):
return edp_engine.EdpEngine(cluster)
if shell_engine.ShellEngine.job_type_supported(job_type):
return shell_engine.ShellEngine(cluster)
return None
def get_edp_job_types(self, versions=None):
res = {}
for vers in self.get_versions():
if not versions or vers in versions:
res[vers] = shell_engine.ShellEngine.get_supported_job_types()
if edp_engine.EdpEngine.edp_supported(vers):
res[vers].extend(
edp_engine.EdpEngine.get_supported_job_types())
return res
def get_edp_config_hints(self, job_type, version):
if (edp_engine.EdpEngine.edp_supported(version) and
edp_engine.EdpEngine.job_type_supported(job_type)):
return edp_engine.EdpEngine.get_possible_job_config(job_type)
if shell_engine.ShellEngine.job_type_supported(job_type):
return shell_engine.ShellEngine.get_possible_job_config(job_type)
return {}
def get_open_ports(self, node_group):
cluster = node_group.cluster
ports_map = {
'namenode': [8020, 50070, 50470],
'datanode': [50010, 1004, 50075, 1006, 50020],
'master': [
int(utils.get_config_value_or_default("Spark", "Master port",
cluster)),
int(utils.get_config_value_or_default("Spark",
"Master webui port",
cluster)),
],
'slave': [
int(utils.get_config_value_or_default("Spark",
"Worker webui port",
cluster))
]
}
ports = []
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
def recommend_configs(self, cluster, scaling=False):
want_to_configure = {
'cluster_configs': {
'dfs.replication': ('HDFS', 'dfs.replication')
}
}
provider = ru.HadoopAutoConfigsProvider(
want_to_configure, self.get_configs(
cluster.hadoop_version), cluster, scaling)
provider.apply_recommended_configs()
|
|
import argparse
import deep_architect.utils as ut
from deep_architect.contrib.misc.datasets.loaders import (load_cifar10,
load_mnist)
from deep_architect.contrib.misc.datasets.dataset import InMemoryDataset
from deep_architect.searchers import common as se
from deep_architect.contrib.misc import gpu_utils
from deep_architect import search_logging as sl
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
from deep_architect.contrib.misc.evaluators.tensorflow.classification import SimpleClassifierEvaluator
from deep_architect.contrib.communicators.communicator import get_communicator
def start_searcher(comm,
searcher,
resume_if_exists,
folderpath,
search_name,
searcher_load_path,
num_samples=-1,
num_epochs=-1,
save_every=1):
assert num_samples != -1 or num_epochs != -1
print('SEARCHER')
sl.create_search_folderpath(folderpath, search_name)
search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
save_filepath = ut.join_paths((search_data_folder, searcher_load_path))
models_sampled = 0
epochs = 0
finished = 0
killed = 0
best_accuracy = 0.
# Load previous searcher
if resume_if_exists:
searcher.load(search_data_folder)
state = ut.read_jsonfile(save_filepath)
epochs = state['epochs']
killed = state['killed']
models_sampled = state['models_finished']
finished = state['models_finished']
while (finished < models_sampled or killed < comm.num_workers):
# Search end conditions
cont = num_samples == -1 or models_sampled < num_samples
cont = cont and (num_epochs == -1 or epochs < num_epochs)
if cont:
# See whether workers are ready to consume architectures
if comm.is_ready_to_publish_architecture():
eval_logger = sl.EvaluationLogger(folderpath, search_name,
models_sampled)
_, _, vs, searcher_eval_token = searcher.sample()
eval_logger.log_config(vs, searcher_eval_token)
comm.publish_architecture_to_worker(vs, models_sampled,
searcher_eval_token)
models_sampled += 1
else:
if comm.is_ready_to_publish_architecture():
comm.kill_worker()
killed += 1
# See which workers have finished evaluation
for worker in range(comm.num_workers):
msg = comm.receive_results_in_master(worker)
if msg is not None:
results, model_id, searcher_eval_token = msg
eval_logger = sl.EvaluationLogger(folderpath, search_name,
model_id)
eval_logger.log_results(results)
if 'epoch' in results:
epochs = max(epochs, results['epoch'])
searcher.update(results['validation_accuracy'],
searcher_eval_token)
best_accuracy = max(best_accuracy,
results['validation_accuracy'])
finished += 1
if finished % save_every == 0:
print('Models sampled: %d Best Accuracy: %f' %
(finished, best_accuracy))
best_accuracy = 0.
searcher.save_state(search_data_folder)
state = {
'models_finished': finished,
'epochs': epochs,
'killed': killed
}
ut.write_jsonfile(state, save_filepath)
def start_worker(comm,
evaluator,
search_space_factory,
folderpath,
search_name,
resume=True,
save_every=1):
# set the available gpu for process
print('WORKER %d' % comm.get_rank())
step = 0
sl.create_search_folderpath(folderpath, search_name)
search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
save_filepath = ut.join_paths(
(search_data_folder, 'worker' + str(comm.get_rank()) + '.json'))
if resume:
evaluator.load_state(search_data_folder)
state = ut.read_jsonfile(save_filepath)
step = state['step']
while (True):
arch = comm.receive_architecture_in_worker()
# if a kill signal is received
if arch is None:
break
vs, evaluation_id, searcher_eval_token = arch
inputs, outputs = search_space_factory.get_search_space()
se.specify(outputs, vs)
results = evaluator.eval(inputs, outputs)
step += 1
if step % save_every == 0:
evaluator.save_state(search_data_folder)
state = {'step': step}
ut.write_jsonfile(state, save_filepath)
comm.publish_results_to_master(results, evaluation_id,
searcher_eval_token)
def main():
configs = ut.read_jsonfile(
"./examples/tensorflow/full_benchmarks/experiment_config.json")
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
# Other arguments
parser.add_argument('--display-output',
'-o',
action='store_true',
dest='display_output',
default=False)
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
options = parser.parse_args()
config = configs[options.config_name]
num_procs = config['num_procs'] if 'num_procs' in config else 0
comm = get_communicator(config['communicator'], num_procs)
if len(gpu_utils.get_gpu_information()) != 0:
#https://github.com/tensorflow/tensorflow/issues/1888
gpu_utils.set_visible_gpus(
[comm.get_rank() % gpu_utils.get_total_num_gpus()])
if 'eager' in config and config['eager']:
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
tf.enable_eager_execution()
datasets = {
'cifar10': lambda: (load_cifar10('data/cifar10/', one_hot=False), 10),
'mnist': lambda: (load_mnist('data/mnist/'), 10),
}
(Xtrain, ytrain, Xval, yval, Xtest,
ytest), num_classes = datasets[config['dataset']]()
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
save_every = 1 if 'save_every' not in config else config['save_every']
if comm.get_rank() == 0:
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
num_samples = -1 if 'samples' not in config else config['samples']
num_epochs = -1 if 'epochs' not in config else config['epochs']
start_searcher(comm,
searcher,
options.resume,
config['search_folder'],
config['search_name'],
config['searcher_file_name'],
num_samples=num_samples,
num_epochs=num_epochs,
save_every=save_every)
else:
train_d_advataset = InMemoryDataset(Xtrain, ytrain, True)
val_dataset = InMemoryDataset(Xval, yval, False)
test_dataset = InMemoryDataset(Xtest, ytest, False)
search_path = sl.get_search_folderpath(config['search_folder'],
config['search_name'])
ut.create_folder(ut.join_paths([search_path, 'scratch_data']),
create_parent_folders=True)
scratch_folder = ut.join_paths(
[search_path, 'scratch_data', 'eval_' + str(comm.get_rank())])
ut.create_folder(scratch_folder)
evaluators = {
'simple_classification':
lambda: SimpleClassifierEvaluator(
train_dataset,
val_dataset,
num_classes,
'./temp' + str(comm.get_rank()),
max_num_training_epochs=config['eval_epochs'],
log_output_to_terminal=options.display_output,
test_dataset=test_dataset),
}
assert not config['evaluator'].startswith('enas') or hasattr(
search_space_factory, 'weight_sharer')
evaluator = evaluators[config['evaluator']]()
start_worker(comm,
evaluator,
search_space_factory,
config['search_folder'],
config['search_name'],
resume=options.resume,
save_every=save_every)
if __name__ == "__main__":
main()
|
|
"""
Calculation thread for modeling
"""
import time
import numpy as np
import math
from sas.sascalc.data_util.calcthread import CalcThread
from sas.sascalc.fit.MultiplicationModel import MultiplicationModel
class Calc2D(CalcThread):
"""
Compute 2D model
This calculation assumes a 2-fold symmetry of the model
where points are computed for one half of the detector
and I(qx, qy) = I(-qx, -qy) is assumed.
"""
def __init__(self, data, model, smearer, qmin, qmax, page_id,
state=None,
weight=None,
fid=None,
toggle_mode_on=False,
completefn=None,
updatefn=None,
update_chisqr=True,
source='model',
yieldtime=0.04,
worktime=0.04,
exception_handler=None,
):
CalcThread.__init__(self, completefn, updatefn, yieldtime, worktime,
exception_handler=exception_handler)
self.qmin = qmin
self.qmax = qmax
self.weight = weight
self.fid = fid
#self.qstep = qstep
self.toggle_mode_on = toggle_mode_on
self.data = data
self.page_id = page_id
self.state = None
# the model on to calculate
self.model = model
self.smearer = smearer
self.starttime = 0
self.update_chisqr = update_chisqr
self.source = source
def compute(self):
"""
Compute the data given a model function
"""
self.starttime = time.time()
# Determine appropriate q range
if self.qmin is None:
self.qmin = 0
if self.qmax is None:
if self.data is not None:
newx = math.pow(max(math.fabs(self.data.xmax),
math.fabs(self.data.xmin)), 2)
newy = math.pow(max(math.fabs(self.data.ymax),
math.fabs(self.data.ymin)), 2)
self.qmax = math.sqrt(newx + newy)
if self.data is None:
msg = "Compute Calc2D receive data = %s.\n" % str(self.data)
raise ValueError, msg
# Define matrix where data will be plotted
radius = np.sqrt((self.data.qx_data * self.data.qx_data) + \
(self.data.qy_data * self.data.qy_data))
# For theory, qmax is based on 1d qmax
# so that must be mulitified by sqrt(2) to get actual max for 2d
index_model = (self.qmin <= radius) & (radius <= self.qmax)
index_model = index_model & self.data.mask
index_model = index_model & np.isfinite(self.data.data)
if self.smearer is not None:
# Set smearer w/ data, model and index.
fn = self.smearer
fn.set_model(self.model)
fn.set_index(index_model)
# Calculate smeared Intensity
#(by Gaussian averaging): DataLoader/smearing2d/Smearer2D()
value = fn.get_value()
else:
# calculation w/o smearing
value = self.model.evalDistribution([
self.data.qx_data[index_model],
self.data.qy_data[index_model]
])
# Initialize output to NaN so masked elements do not get plotted.
output = np.empty_like(self.data.qx_data)
# output default is None
# This method is to distinguish between masked
#point(nan) and data point = 0.
output[:] = np.NaN
# set value for self.mask==True, else still None to Plottools
output[index_model] = value
elapsed = time.time() - self.starttime
self.complete(image=output,
data=self.data,
page_id=self.page_id,
model=self.model,
state=self.state,
toggle_mode_on=self.toggle_mode_on,
elapsed=elapsed,
index=index_model,
fid=self.fid,
qmin=self.qmin,
qmax=self.qmax,
weight=self.weight,
#qstep=self.qstep,
update_chisqr=self.update_chisqr,
source=self.source)
class Calc1D(CalcThread):
"""
Compute 1D data
"""
def __init__(self, model,
page_id,
data,
fid=None,
qmin=None,
qmax=None,
weight=None,
smearer=None,
toggle_mode_on=False,
state=None,
completefn=None,
update_chisqr=True,
source='model',
updatefn=None,
yieldtime=0.01,
worktime=0.01,
exception_handler=None,
):
"""
"""
CalcThread.__init__(self, completefn, updatefn, yieldtime, worktime,
exception_handler=exception_handler)
self.fid = fid
self.data = data
self.qmin = qmin
self.qmax = qmax
self.model = model
self.weight = weight
self.toggle_mode_on = toggle_mode_on
self.state = state
self.page_id = page_id
self.smearer = smearer
self.starttime = 0
self.update_chisqr = update_chisqr
self.source = source
self.out = None
self.index = None
def compute(self):
"""
Compute model 1d value given qmin , qmax , x value
"""
self.starttime = time.time()
output = np.zeros((len(self.data.x)))
index = (self.qmin <= self.data.x) & (self.data.x <= self.qmax)
# If we use a smearer, also return the unsmeared model
unsmeared_output = None
unsmeared_data = None
unsmeared_error = None
##smearer the ouput of the plot
if self.smearer is not None:
first_bin, last_bin = self.smearer.get_bin_range(self.qmin,
self.qmax)
mask = self.data.x[first_bin:last_bin+1]
unsmeared_output = np.zeros((len(self.data.x)))
unsmeared_output[first_bin:last_bin+1] = self.model.evalDistribution(mask)
self.smearer.model = self.model
output = self.smearer(unsmeared_output, first_bin, last_bin)
# Rescale data to unsmeared model
# Check that the arrays are compatible. If we only have a model but no data,
# the length of data.y will be zero.
if isinstance(self.data.y, np.ndarray) and output.shape == self.data.y.shape:
unsmeared_data = np.zeros((len(self.data.x)))
unsmeared_error = np.zeros((len(self.data.x)))
unsmeared_data[first_bin:last_bin+1] = self.data.y[first_bin:last_bin+1]\
* unsmeared_output[first_bin:last_bin+1]\
/ output[first_bin:last_bin+1]
unsmeared_error[first_bin:last_bin+1] = self.data.dy[first_bin:last_bin+1]\
* unsmeared_output[first_bin:last_bin+1]\
/ output[first_bin:last_bin+1]
unsmeared_output=unsmeared_output[index]
unsmeared_data=unsmeared_data[index]
unsmeared_error=unsmeared_error
else:
output[index] = self.model.evalDistribution(self.data.x[index])
x=self.data.x[index]
y=output[index]
sq_values = None
pq_values = None
if isinstance(self.model, MultiplicationModel):
s_model = self.model.s_model
p_model = self.model.p_model
sq_values = s_model.evalDistribution(x)
pq_values = p_model.evalDistribution(x)
elif hasattr(self.model, "calc_composition_models"):
results = self.model.calc_composition_models(x)
if results is not None:
pq_values, sq_values = results
elapsed = time.time() - self.starttime
self.complete(x=x, y=y,
page_id=self.page_id,
state=self.state,
weight=self.weight,
fid=self.fid,
toggle_mode_on=self.toggle_mode_on,
elapsed=elapsed, index=index, model=self.model,
data=self.data,
update_chisqr=self.update_chisqr,
source=self.source,
unsmeared_model=unsmeared_output,
unsmeared_data=unsmeared_data,
unsmeared_error=unsmeared_error,
pq_model=pq_values,
sq_model=sq_values)
def results(self):
"""
Send resuts of the computation
"""
return [self.out, self.index]
"""
Example: ::
class CalcCommandline:
def __init__(self, n=20000):
#print thread.get_ident()
from sas.models.CylinderModel import CylinderModel
model = CylinderModel()
print model.runXY([0.01, 0.02])
qmax = 0.01
qstep = 0.0001
self.done = False
x = numpy.arange(-qmax, qmax+qstep*0.01, qstep)
y = numpy.arange(-qmax, qmax+qstep*0.01, qstep)
calc_thread_2D = Calc2D(x, y, None, model.clone(),None,
-qmax, qmax,qstep,
completefn=self.complete,
updatefn=self.update ,
yieldtime=0.0)
calc_thread_2D.queue()
calc_thread_2D.ready(2.5)
while not self.done:
time.sleep(1)
def update(self,output):
print "update"
def complete(self, image, data, model, elapsed, qmin, qmax,index, qstep ):
print "complete"
self.done = True
if __name__ == "__main__":
CalcCommandline()
"""
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:5888")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:5888")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a ThePandacoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
"""Create bcbio_sample.yaml files from standard templates and lists of input files.
Provides an automated way to generate a full set of analysis files from an inpu
YAML template. Default templates are provided for common approaches which can be tweaked
as needed.
"""
import collections
import contextlib
import copy
import csv
import datetime
import glob
import itertools
import os
import shutil
import urllib2
import toolz as tz
import yaml
from bcbio import utils
from bcbio.bam import fastq, sample_name
from bcbio.distributed import objectstore
from bcbio.upload import s3
from bcbio.pipeline import run_info
from bcbio.workflow.xprize import HelpArgParser
def parse_args(inputs):
parser = HelpArgParser(
description="Create a bcbio_sample.yaml file from a standard template and inputs")
parser = setup_args(parser)
return parser.parse_args(inputs)
def setup_args(parser):
parser.add_argument("template", help=("Template name or path to template YAML file. "
"Built in choices: freebayes-variant, gatk-variant, tumor-paired, "
"noalign-variant, illumina-rnaseq, illumina-chipseq"))
parser.add_argument("metadata", help="CSV file with project metadata. Name of file used as project name.")
parser.add_argument("input_files", nargs="*", help="Input read files, in BAM or fastq format")
parser.add_argument("--only-metadata", help="Ignore samples not present in metadata CSV file",
action="store_true", default=False)
return parser
# ## Prepare sequence data inputs
def _prep_bam_input(f, i, base):
if not os.path.exists(f) and not objectstore.is_remote(f):
raise ValueError("Could not find input file: %s" % f)
cur = copy.deepcopy(base)
if objectstore.is_remote(f):
cur["files"] = [f]
cur["description"] = os.path.splitext(os.path.basename(f))[0]
else:
cur["files"] = [os.path.abspath(f)]
cur["description"] = ((sample_name(f) if f.endswith(".bam") else None)
or os.path.splitext(os.path.basename(f))[0])
return cur
def _prep_fastq_input(fs, base):
for f in fs:
if not os.path.exists(f) and not objectstore.is_remote(f):
raise ValueError("Could not find input file: %s" % f)
cur = copy.deepcopy(base)
cur["files"] = [os.path.abspath(f) if not objectstore.is_remote(f) else f for f in fs]
d = os.path.commonprefix([utils.splitext_plus(os.path.basename(f))[0] for f in fs])
cur["description"] = fastq.rstrip_extra(d)
return cur
KNOWN_EXTS = {".bam": "bam", ".cram": "bam", ".fq": "fastq",
".fastq": "fastq", ".txt": "fastq",
".fastq.gz": "fastq", ".fq.gz": "fastq",
".txt.gz": "fastq", ".gz": "fastq"}
def _prep_items_from_base(base, in_files):
"""Prepare a set of configuration items for input files.
"""
details = []
in_files = _expand_dirs(in_files, KNOWN_EXTS)
in_files = _expand_wildcards(in_files)
for i, (ext, files) in enumerate(itertools.groupby(
in_files, lambda x: KNOWN_EXTS.get(utils.splitext_plus(x)[-1].lower()))):
if ext == "bam":
for f in files:
details.append(_prep_bam_input(f, i, base))
elif ext == "fastq":
files = list(files)
for fs in fastq.combine_pairs(files):
details.append(_prep_fastq_input(fs, base))
else:
print("Ignoring ynexpected input file types %s: %s" % (ext, list(files)))
return details
def _expand_file(x):
return os.path.abspath(os.path.normpath(os.path.expanduser(os.path.expandvars(x))))
def _expand_dirs(in_files, known_exts):
def _is_dir(in_file):
return os.path.isdir(os.path.expanduser(in_file))
files, dirs = utils.partition(_is_dir, in_files)
for dir in dirs:
for ext in known_exts.keys():
wildcard = os.path.join(os.path.expanduser(dir), "*" + ext)
files = itertools.chain(glob.glob(wildcard), files)
return list(files)
def _expand_wildcards(in_files):
def _has_wildcard(in_file):
return "*" in in_file
files, wildcards = utils.partition(_has_wildcard, in_files)
for wc in wildcards:
abs_path = os.path.expanduser(wc)
files = itertools.chain(glob.glob(abs_path), files)
return list(files)
# ## Read and write configuration files
def name_to_config(template):
"""Read template file into a dictionary to use as base for all samples.
Handles well-known template names, pulled from GitHub repository and local
files.
"""
if objectstore.is_remote(template):
with objectstore.open(template) as in_handle:
config = yaml.load(in_handle)
txt_config = None
elif os.path.isfile(template):
with open(template) as in_handle:
txt_config = in_handle.read()
with open(template) as in_handle:
config = yaml.load(in_handle)
else:
base_url = "https://raw.github.com/chapmanb/bcbio-nextgen/master/config/templates/%s.yaml"
try:
with contextlib.closing(urllib2.urlopen(base_url % template)) as in_handle:
txt_config = in_handle.read()
with contextlib.closing(urllib2.urlopen(base_url % template)) as in_handle:
config = yaml.load(in_handle)
except (urllib2.HTTPError, urllib2.URLError):
raise ValueError("Could not find template '%s' locally or in standard templates on GitHub"
% template)
return config, txt_config
def _write_template_config(template_txt, project_name, out_dir):
config_dir = utils.safe_makedir(os.path.join(out_dir, "config"))
out_config_file = os.path.join(config_dir, "%s-template.yaml" % project_name)
with open(out_config_file, "w") as out_handle:
out_handle.write(template_txt)
return out_config_file
def _write_config_file(items, global_vars, template, project_name, out_dir,
remotes):
"""Write configuration file, adding required top level attributes.
"""
config_dir = utils.safe_makedir(os.path.join(out_dir, "config"))
out_config_file = os.path.join(config_dir, "%s.yaml" % project_name)
out = {"fc_date": datetime.datetime.now().strftime("%Y-%m-%d"),
"fc_name": project_name,
"upload": {"dir": "../final"},
"details": items}
if remotes.get("base"):
r_base = objectstore.parse_remote(remotes.get("base"))
out["upload"]["method"] = r_base.store
out["upload"]["bucket"] = r_base.bucket
out["upload"]["folder"] = os.path.join(r_base.key, "final") if r_base.key else "final"
if r_base.region:
out["upload"]["region"] = r_base.region
if global_vars:
out["globals"] = global_vars
for k, v in template.iteritems():
if k not in ["details"]:
out[k] = v
if os.path.exists(out_config_file):
shutil.move(out_config_file,
out_config_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
with open(out_config_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_config_file
def _safe_name(x):
for prob in [" ", "."]:
x = x.replace(prob, "_")
return x
def _set_global_vars(metadata):
"""Identify files used multiple times in metadata and replace with global variables
"""
fnames = collections.defaultdict(list)
for sample in metadata.keys():
for k, v in metadata[sample].items():
if isinstance(v, basestring) and os.path.isfile(v):
v = _expand_file(v)
metadata[sample][k] = v
fnames[v].append(k)
global_vars = {}
# Skip global vars -- more confusing than useful
# loc_counts = collections.defaultdict(int)
# global_var_sub = {}
# for fname, locs in fnames.items():
# if len(locs) > 1:
# loc_counts[locs[0]] += 1
# name = "%s%s" % (locs[0], loc_counts[locs[0]])
# global_var_sub[fname] = name
# global_vars[name] = fname
# for sample in metadata.keys():
# for k, v in metadata[sample].items():
# if isinstance(v, basestring) and v in global_var_sub:
# metadata[sample][k] = global_var_sub[v]
return metadata, global_vars
def _parse_metadata(in_handle):
"""Reads metadata from a simple CSV structured input file.
samplename,batch,phenotype
ERR256785,batch1,normal
"""
metadata = {}
reader = csv.reader(in_handle)
while 1:
header = reader.next()
if not header[0].startswith("#"):
break
keys = [x.strip() for x in header[1:]]
for sinfo in (x for x in reader if not x[0].startswith("#")):
sinfo = [_strip_and_convert_lists(x) for x in sinfo]
sample = sinfo[0]
# sanity check to avoid duplicate rows
if sample in metadata:
raise ValueError("Sample %s present multiple times in metadata file.\n"
"If you need to specify multiple attributes as a list "
"use a semi-colon to separate them on a single line.\n"
"https://bcbio-nextgen.readthedocs.org/en/latest/"
"contents/configuration.html#automated-sample-configuration\n"
"Duplicate line is %s" % (sample, sinfo))
metadata[sample] = dict(zip(keys, sinfo[1:]))
metadata, global_vars = _set_global_vars(metadata)
return metadata, global_vars
def _strip_and_convert_lists(field):
field = field.strip()
if "," in field:
field = [x.strip() for x in field.split(",")]
return field
def _pname_and_metadata(in_file):
"""Retrieve metadata and project name from the input metadata CSV file.
Uses the input file name for the project name and for back compatibility,
accepts the project name as an input, providing no metadata.
"""
if os.path.isfile(in_file):
with open(in_file) as in_handle:
md, global_vars = _parse_metadata(in_handle)
base = os.path.splitext(os.path.basename(in_file))[0]
md_file = in_file
elif objectstore.is_remote(in_file):
with objectstore.open(in_file) as in_handle:
md, global_vars = _parse_metadata(in_handle)
base = os.path.splitext(os.path.basename(in_file))[0]
md_file = None
else:
if in_file.endswith(".csv"):
raise ValueError("Did not find input metadata file: %s" % in_file)
base, md, global_vars = _safe_name(os.path.splitext(os.path.basename(in_file))[0]), {}, {}
md_file = None
return _safe_name(base), md, global_vars, md_file
def _handle_special_yaml_cases(v):
"""Handle values that pass integer, boolean or list values.
"""
if ";" in v:
v = v.split(";")
elif isinstance(v, list):
v = v
else:
try:
v = int(v)
except ValueError:
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
return v
def _add_ped_metadata(name, metadata):
"""Add standard PED file attributes into metadata if not present.
http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
"""
def _ped_mapping(x, valmap):
try:
x = int(x)
except ValueError:
x = -1
for k, v in valmap.items():
if k == x:
return v
return None
def _ped_to_gender(x):
return _ped_mapping(x, {1: "male", 2: "female"})
def _ped_to_phenotype(x):
return _ped_mapping(x, {1: "unaffected", 2: "affected"})
with open(metadata["ped"]) as in_handle:
for line in in_handle:
parts = line.split("\t")[:6]
if parts[1] == str(name):
for index, key, convert_fn in [(4, "sex", _ped_to_gender), (0, "batch", lambda x: x),
(5, "phenotype", _ped_to_phenotype)]:
val = convert_fn(parts[index])
if val is not None and key not in metadata:
metadata[key] = val
break
return metadata
def _add_metadata(item, metadata, remotes, only_metadata=False):
"""Add metadata information from CSV file to current item.
Retrieves metadata based on 'description' parsed from input CSV file.
Adds to object and handles special keys:
- `description`: A new description for the item. Used to relabel items
based on the pre-determined description from fastq name or BAM read groups.
- Keys matching supported names in the algorithm section map
to key/value pairs there instead of metadata.
"""
item_md = metadata.get(item["description"],
metadata.get(os.path.basename(item["files"][0]),
metadata.get(os.path.splitext(os.path.basename(item["files"][0]))[0], {})))
if remotes.get("region"):
item["algorithm"]["variant_regions"] = remotes["region"]
TOP_LEVEL = set(["description", "genome_build", "lane", "vrn_files", "files", "analysis"])
keep_sample = True
if len(item_md) > 0:
if "metadata" not in item:
item["metadata"] = {}
for k, v in item_md.iteritems():
if v:
if k in TOP_LEVEL:
item[k] = v
elif k in run_info.ALGORITHM_KEYS:
v = _handle_special_yaml_cases(v)
item["algorithm"][k] = v
else:
v = _handle_special_yaml_cases(v)
item["metadata"][k] = v
elif len(metadata) > 0:
warn = "Dropped sample" if only_metadata else "Added minimal sample information"
print "WARNING: %s: metadata not found for %s, %s" % (warn, item["description"],
os.path.basename(item["files"][0]))
keep_sample = not only_metadata
if tz.get_in(["metadata", "ped"], item):
item["metadata"] = _add_ped_metadata(item["description"], item["metadata"])
return item if keep_sample else None
def _retrieve_remote(fnames):
"""Retrieve remote inputs found in the same bucket as the template or metadata files.
"""
for fname in fnames:
if objectstore.is_remote(fname):
inputs = []
regions = []
remote_base = os.path.dirname(fname)
for rfname in objectstore.list(remote_base):
if rfname.endswith(tuple(KNOWN_EXTS.keys())):
inputs.append(rfname)
elif rfname.endswith((".bed", ".bed.gz")):
regions.append(rfname)
return {"base": remote_base,
"inputs": inputs,
"region": regions[0] if len(regions) == 1 else None}
return {}
def _convert_to_relpaths(data, work_dir):
"""Convert absolute paths in the input data to relative paths to the work directory.
"""
work_dir = os.path.abspath(work_dir)
data["files"] = [os.path.relpath(f, work_dir) for f in data["files"]]
for topk in ["metadata", "algorithm"]:
for k, v in data[topk].items():
if isinstance(v, basestring) and os.path.isfile(v) and os.path.isabs(v):
data[topk][k] = os.path.relpath(v, work_dir)
return data
def setup(args):
template, template_txt = name_to_config(args.template)
base_item = template["details"][0]
project_name, metadata, global_vars, md_file = _pname_and_metadata(args.metadata)
remotes = _retrieve_remote([args.metadata, args.template])
inputs = args.input_files + remotes.get("inputs", [])
raw_items = [_add_metadata(item, metadata, remotes, args.only_metadata)
for item in _prep_items_from_base(base_item, inputs)]
items = [x for x in raw_items if x]
out_dir = os.path.join(os.getcwd(), project_name)
work_dir = utils.safe_makedir(os.path.join(out_dir, "work"))
if hasattr(args, "relpaths") and args.relpaths:
items = [_convert_to_relpaths(x, work_dir) for x in items]
out_config_file = _write_template_config(template_txt, project_name, out_dir)
if md_file:
shutil.copyfile(md_file, os.path.join(out_dir, "config", os.path.basename(md_file)))
if len(items) == 0:
print
print "Template configuration file created at: %s" % out_config_file
print "Edit to finalize custom options, then prepare full sample config with:"
print " bcbio_nextgen.py -w template %s %s sample1.bam sample2.fq" % \
(out_config_file, project_name)
else:
out_config_file = _write_config_file(items, global_vars, template, project_name, out_dir,
remotes)
print
print "Configuration file created at: %s" % out_config_file
print "Edit to finalize and run with:"
print " cd %s" % work_dir
print " bcbio_nextgen.py ../config/%s" % os.path.basename(out_config_file)
if remotes.get("base"):
remote_path = os.path.join(remotes["base"], os.path.basename(out_config_file))
s3.upload_file_boto(out_config_file, remote_path)
print "Also uploaded to AWS S3 in %s" % remotes["base"]
print "Run directly with bcbio_vm.py run %s" % remote_path
|
|
import ConfigParser
import copy
import datetime
import getpass
import logging
import os
import re
import sys
from StringIO import StringIO
import traceback
import types
import textwrap
from optparse import OptionParser, OptionGroup
import basedefs
import validators
from . import utils
import processors
import output_messages
from .exceptions import FlagValidationError, ParamValidationError
from packstack.modules.ospluginutils import gethostlist
from setup_controller import Controller
controller = Controller()
commandLineValues = {}
# List to hold all values to be masked in logging (i.e. passwords and sensitive data)
#TODO: read default values from conf_param?
masked_value_set = set()
def initLogging (debug):
global logFile
try:
logFilename = "openstack-setup.log"
logFile = os.path.join(basedefs.DIR_LOG, logFilename)
# Create the log file with specific permissions, puppet has a habbit of putting
# passwords in logs
os.close(os.open(logFile, os.O_CREAT | os.O_EXCL, 0600))
hdlr = logging.FileHandler (filename=logFile, mode='w')
if (debug):
level = logging.DEBUG
else:
level = logging.INFO
fmts='%(asctime)s::%(levelname)s::%(module)s::%(lineno)d::%(name)s:: %(message)s'
dfmt='%Y-%m-%d %H:%M:%S'
fmt = logging.Formatter(fmts, dfmt)
hdlr.setFormatter(fmt)
logging.root.handlers = []
logging.root.addHandler(hdlr)
logging.root.setLevel(level)
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_FAILED_INIT_LOGGER)
def _getInputFromUser(param):
"""
this private func reads the data from the user
for the given param
"""
loop = True
userInput = None
try:
if param.USE_DEFAULT:
logging.debug("setting default value (%s) for key (%s)" % (mask(param.DEFAULT_VALUE), param.CONF_NAME))
controller.CONF[param.CONF_NAME] = param.DEFAULT_VALUE
else:
while loop:
# If the value was not supplied by the command line flags
if not commandLineValues.has_key(param.CONF_NAME):
message = StringIO()
message.write(param.PROMPT)
val_list = param.VALIDATORS or []
if validators.validate_regexp not in val_list \
and param.OPTION_LIST:
message.write(" [%s]" % "|".join(param.OPTION_LIST))
if param.DEFAULT_VALUE:
message.write(" [%s] " % (str(param.DEFAULT_VALUE)))
message.write(": ")
message.seek(0)
#mask password or hidden fields
if (param.MASK_INPUT):
userInput = getpass.getpass("%s :" % (param.PROMPT))
else:
userInput = raw_input(message.read())
else:
userInput = commandLineValues[param.CONF_NAME]
# If DEFAULT_VALUE is set and user did not input anything
if userInput == "" and len(str(param.DEFAULT_VALUE)) > 0:
userInput = param.DEFAULT_VALUE
# Param processing
userInput = process_param_value(param, userInput)
# If param requires validation
try:
validate_param_value(param, userInput)
controller.CONF[param.CONF_NAME] = userInput
loop = False
except ParamValidationError:
if param.LOOSE_VALIDATION:
# If validation failed but LOOSE_VALIDATION is true, ask user
answer = _askYesNo("User input failed validation, "
"do you still wish to use it")
loop = not answer
if answer:
controller.CONF[param.CONF_NAME] = userInput
continue
else:
if commandLineValues.has_key(param.CONF_NAME):
del commandLineValues[param.CONF_NAME]
else:
# Delete value from commandLineValues so that we will prompt the user for input
if commandLineValues.has_key(param.CONF_NAME):
del commandLineValues[param.CONF_NAME]
loop = True
except KeyboardInterrupt:
print "" # add the new line so messages wont be displayed in the same line as the question
raise
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_READ_INPUT_PARAM % (param.CONF_NAME))
def input_param(param):
"""
this func will read input from user
and ask confirmation if needed
"""
# We need to check if a param needs confirmation, (i.e. ask user twice)
# Do not validate if it was given from the command line
if (param.NEED_CONFIRM and not commandLineValues.has_key(param.CONF_NAME)):
#create a copy of the param so we can call it twice
confirmedParam = copy.deepcopy(param)
confirmedParamName = param.CONF_NAME + "_CONFIRMED"
confirmedParam.CONF_NAME = confirmedParamName
confirmedParam.PROMPT = output_messages.INFO_CONF_PARAMS_PASSWD_CONFIRM_PROMPT
confirmedParam.VALIDATORS = [validators.validate_not_empty]
# Now get both values from user (with existing validations
while True:
_getInputFromUser(param)
_getInputFromUser(confirmedParam)
if controller.CONF[param.CONF_NAME] == controller.CONF[confirmedParamName]:
logging.debug("Param confirmation passed, value for both questions is identical")
break
else:
print output_messages.INFO_VAL_PASSWORD_DONT_MATCH
else:
_getInputFromUser(param)
return param
def _askYesNo(question=None):
message = StringIO()
while True:
askString = "\r%s? (yes|no): "%(question)
logging.debug("asking user: %s"%askString)
message.write(askString)
message.seek(0)
raw = raw_input(message.read())
if not len(raw):
continue
answer = raw[0].lower()
logging.debug("user answered read: %s"%(answer))
if answer not in 'yn':
continue
return answer == 'y'
def _addDefaultsToMaskedValueSet():
"""
For every param in conf_params
that has MASK_INPUT enabled keep the default value
in the 'masked_value_set'
"""
global masked_value_set
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
# Keep default password values masked, but ignore default empty values
if ((param.MASK_INPUT == True) and param.DEFAULT_VALUE != ""):
masked_value_set.add(param.DEFAULT_VALUE)
def _updateMaskedValueSet():
"""
For every param in conf
has MASK_INPUT enabled keep the user input
in the 'masked_value_set'
"""
global masked_value_set
for confName in controller.CONF:
# Add all needed values to masked_value_set
if (controller.getParamKeyValue(confName, "MASK_INPUT") == True):
masked_value_set.add(controller.CONF[confName])
def mask(input):
"""
Gets a dict/list/str and search maksked values in them.
The list of masked values in is masked_value_set and is updated
via the user input
If it finds, it replaces them with '********'
"""
output = copy.deepcopy(input)
if type(input) == types.DictType:
for key in input:
if type(input[key]) == types.StringType:
output[key] = utils.mask_string(input[key],
masked_value_set)
if type(input) == types.ListType:
for item in input:
org = item
orgIndex = input.index(org)
if type(item) == types.StringType:
item = utils.mask_string(item, masked_value_set)
if item != org:
output.remove(org)
output.insert(orgIndex, item)
if type(input) == types.StringType:
output = utils.mask_string(input, masked_value_set)
return output
def removeMaskString(maskedString):
"""
remove an element from masked_value_set
we need to itterate over the set since
calling set.remove() on an string that does not exit
will raise an exception
"""
global masked_value_set
# Since we cannot remove an item from a set during itteration over
# the said set, we only mark a flag and if the flag is set to True
# we remove the string from the set.
found = False
for item in masked_value_set:
if item == maskedString:
found = True
if found:
masked_value_set.remove(maskedString)
def validate_param_value(param, value):
cname = param.CONF_NAME
logging.debug("Validating parameter %s." % cname)
val_list = param.VALIDATORS or []
opt_list = param.OPTION_LIST
for val_func in val_list:
try:
val_func(value, opt_list)
except ParamValidationError as ex:
print 'Parameter %s failed validation: %s' % (cname, ex)
raise
def process_param_value(param, value):
_value = value
proclist = param.PROCESSORS or []
for proc_func in proclist:
logging.debug("Processing value of parameter "
"%s." % param.CONF_NAME)
try:
new_value = proc_func(_value, controller.CONF)
if new_value != _value:
msg = output_messages.INFO_CHANGED_VALUE
print msg % (_value, new_value)
_value = new_value
else:
logging.debug("Processor returned the original "
"value: %s" % _value)
except processors.ParamProcessingError, ex:
print ("Value processing of parameter %s "
"failed.\n%s" % (param.CONF_NAME, ex))
raise
return _value
def _handleGroupCondition(config, conditionName, conditionValue):
"""
handle params group pre/post condition
checks if a group has a pre/post condition
and validates the params related to the group
"""
# If the post condition is a function
if callable(conditionName):
# Call the function conditionName with conf as the arg
conditionValue = conditionName(controller.CONF)
# If the condition is a string - just read it to global conf
# We assume that if we get a string as a member it is the name of a member of conf_params
elif type(conditionName) == types.StringType:
conditionValue = _loadParamFromFile(config, "general", conditionName)
else:
# Any other type is invalid
raise TypeError("%s type (%s) is not supported" % (conditionName, type(conditionName)))
return conditionValue
def _loadParamFromFile(config, section, paramName):
"""
read param from file
validate it
and load to to global conf dict
"""
# Get paramName from answer file
try:
value = config.get(section, paramName)
except ConfigParser.NoOptionError:
raise KeyError('Parser cannot find option %s in '
'answer file.' % paramName)
# Validate param value using its validation func
param = controller.getParamByName(paramName)
value = process_param_value(param, value)
validate_param_value(param, value)
# Keep param value in our never ending global conf
controller.CONF[param.CONF_NAME] = value
return value
def _handleAnswerFileParams(answerFile):
"""
handle loading and validating
params from answer file
supports reading single or group params
"""
try:
logging.debug("Starting to handle config file")
# Read answer file
fconf = ConfigParser.ConfigParser()
fconf.read(answerFile)
# Iterate all the groups and check the pre/post conditions
for group in controller.getAllGroups():
# Get all params per group
# Handle pre conditions for group
preConditionValue = True
if group.PRE_CONDITION:
preConditionValue = _handleGroupCondition(fconf, group.PRE_CONDITION, preConditionValue)
# Handle pre condition match with case insensitive values
logging.info("Comparing pre- conditions, value: '%s', and match: '%s'" % (preConditionValue, group.PRE_CONDITION_MATCH))
if preConditionValue == group.PRE_CONDITION_MATCH:
for param in group.parameters.itervalues():
_loadParamFromFile(fconf, "general", param.CONF_NAME)
# Handle post conditions for group only if pre condition passed
postConditionValue = True
if group.POST_CONDITION:
postConditionValue = _handleGroupCondition(fconf, group.POST_CONDITION, postConditionValue)
# Handle post condition match for group
if postConditionValue != group.POST_CONDITION_MATCH:
logging.error("The group condition (%s) returned: %s, which differs from the excpeted output: %s"%\
(group.GROUP_NAME, postConditionValue, group.POST_CONDITION_MATCH))
raise ValueError(output_messages.ERR_EXP_GROUP_VALIDATION_ANS_FILE%\
(group.GROUP_NAME, postConditionValue, group.POST_CONDITION_MATCH))
else:
logging.debug("condition (%s) passed" % group.POST_CONDITION)
else:
logging.debug("no post condition check for group %s" % group.GROUP_NAME)
else:
logging.debug("skipping params group %s since value of group validation is %s" % (group.GROUP_NAME, preConditionValue))
except Exception as e:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_HANDLE_ANSWER_FILE%(e))
def _getanswerfilepath():
path = None
msg = "Could not find a suitable path on which to create the answerfile"
ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
p = os.path.expanduser("~/")
if os.access(p, os.W_OK):
path = os.path.abspath(os.path.join(p, "packstack-answers-%s.txt"%ts))
msg = "A new answerfile was created in: %s" % path
controller.MESSAGES.append(msg)
logging.info(msg)
return path
def _handleInteractiveParams():
try:
logging.debug("Groups: %s" % ', '.join([x.GROUP_NAME for x in controller.getAllGroups()]))
for group in controller.getAllGroups():
preConditionValue = True
logging.debug("going over group %s" % group.GROUP_NAME)
# If pre_condition is set, get Value
if group.PRE_CONDITION:
preConditionValue = _getConditionValue(group.PRE_CONDITION)
inputLoop = True
# If we have a match, i.e. condition returned True, go over all params in the group
logging.info("Comparing pre-conditions; condition: '%s', and match: '%s'" % (preConditionValue, group.PRE_CONDITION_MATCH))
if preConditionValue == group.PRE_CONDITION_MATCH:
while inputLoop:
for param in group.parameters.itervalues():
if not param.CONDITION:
input_param(param)
#update password list, so we know to mask them
_updateMaskedValueSet()
postConditionValue = True
# If group has a post condition, we check it after we get the input from
# all the params in the group. if the condition returns False, we loop over the group again
if group.POST_CONDITION:
postConditionValue = _getConditionValue(group.POST_CONDITION)
if postConditionValue == group.POST_CONDITION_MATCH:
inputLoop = False
else:
#we clear the value of all params in the group
#in order to re-input them by the user
for param in group.parameters.itervalues():
if controller.CONF.has_key(param.CONF_NAME):
del controller.CONF[param.CONF_NAME]
if commandLineValues.has_key(param.CONF_NAME):
del commandLineValues[param.CONF_NAME]
else:
inputLoop = False
else:
logging.debug("no post condition check for group %s" % group.GROUP_NAME)
path = _getanswerfilepath()
_displaySummary()
if path:
generateAnswerFile(path)
except KeyboardInterrupt:
logging.error("keyboard interrupt caught")
raise Exception(output_messages.ERR_EXP_KEYBOARD_INTERRUPT)
except Exception:
logging.error(traceback.format_exc())
raise
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_HANDLE_PARAMS)
def _handleParams(configFile):
_addDefaultsToMaskedValueSet()
if configFile:
_handleAnswerFileParams(configFile)
else:
_handleInteractiveParams()
def _getConditionValue(matchMember):
returnValue = False
if type(matchMember) == types.FunctionType:
returnValue = matchMember(controller.CONF)
elif type(matchMember) == types.StringType:
#we assume that if we get a string as a member it is the name
#of a member of conf_params
if not controller.CONF.has_key(matchMember):
param = controller.getParamByName(matchMember)
input_param(param)
returnValue = controller.CONF[matchMember]
else:
raise TypeError("%s type (%s) is not supported"%(matchMember, type(matchMember)))
return returnValue
def _displaySummary():
print output_messages.INFO_DSPLY_PARAMS
print "=" * (len(output_messages.INFO_DSPLY_PARAMS) - 1)
logging.info("*** User input summary ***")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if not param.USE_DEFAULT and controller.CONF.has_key(param.CONF_NAME):
cmdOption = param.CMD_OPTION
l = 30 - len(cmdOption)
maskParam = param.MASK_INPUT
# Only call mask on a value if the param has MASK_INPUT set to True
if maskParam:
logging.info("%s: %s" % (cmdOption, mask(controller.CONF[param.CONF_NAME])))
print "%s:" % (cmdOption) + " " * l + mask(controller.CONF[param.CONF_NAME])
else:
# Otherwise, log & display it as it is
logging.info("%s: %s" % (cmdOption, str(controller.CONF[param.CONF_NAME])))
print "%s:" % (cmdOption) + " " * l + str(controller.CONF[param.CONF_NAME])
logging.info("*** User input summary ***")
answer = _askYesNo(output_messages.INFO_USE_PARAMS)
if not answer:
logging.debug("user chose to re-enter the user parameters")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if controller.CONF.has_key(param.CONF_NAME):
if not param.MASK_INPUT:
param.DEFAULT_VALUE = controller.CONF[param.CONF_NAME]
# Remove the string from mask_value_set in order
# to remove values that might be over overwritten.
removeMaskString(controller.CONF[param.CONF_NAME])
del controller.CONF[param.CONF_NAME]
if commandLineValues.has_key(param.CONF_NAME):
del commandLineValues[param.CONF_NAME]
print ""
logging.debug("calling handleParams in interactive mode")
return _handleParams(None)
else:
logging.debug("user chose to accept user parameters")
def _printAdditionalMessages():
if len(controller.MESSAGES) > 0:
print "\n",output_messages.INFO_ADDTIONAL_MSG
for msg in controller.MESSAGES:
logging.info(output_messages.INFO_ADDTIONAL_MSG_BULLET%(msg))
print output_messages.INFO_ADDTIONAL_MSG_BULLET%(msg)
def _addFinalInfoMsg():
"""
add info msg to the user finalizing the
successfull install of rhemv
"""
controller.MESSAGES.append(output_messages.INFO_LOG_FILE_PATH%(logFile))
def _summaryParamsToLog():
if len(controller.CONF) > 0:
logging.debug("*** The following params were used as user input:")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if controller.CONF.has_key(param.CONF_NAME):
maskedValue = mask(controller.CONF[param.CONF_NAME])
logging.debug("%s: %s" % (param.CMD_OPTION, maskedValue ))
def runSequences():
controller.runAllSequences()
def _main(configFile=None):
logging.debug("Entered main(configFile='%s')"%(configFile))
print output_messages.INFO_HEADER
# Get parameters
_handleParams(configFile)
# Update masked_value_list with user input values
_updateMaskedValueSet()
# Print masked conf
logging.debug(mask(controller.CONF))
# Start configuration stage
logging.debug("Entered Configuration stage")
print "\n",output_messages.INFO_INSTALL
# Initialize Sequences
initPluginsSequences()
# Run main setup logic
runSequences()
# Lock rhevm version
#_lockRpmVersion()
# Print info
_addFinalInfoMsg()
print output_messages.INFO_INSTALL_SUCCESS
def remove_remote_var_dirs():
"""
Removes the temp directories on remote hosts,
doesn't remove data on localhost
"""
for host in gethostlist(controller.CONF):
try:
host_dir = controller.temp_map[host]
except KeyError:
# Nothing was added to this host yet, so we have nothing to delete
continue
logging.info(output_messages.INFO_REMOVE_REMOTE_VAR % (host_dir, host))
server = utils.ScriptRunner(host)
server.append('rm -rf %s' % host_dir)
try:
server.execute()
except Exception, e:
msg = output_messages.ERR_REMOVE_REMOTE_VAR % (host_dir, host)
logging.error(msg)
logging.exception(e)
controller.MESSAGES.append(utils.color_text(msg, 'red'))
def generateAnswerFile(outputFile, overrides={}):
sep = os.linesep
fmt = ("%(comment)s%(separator)s%(conf_name)s=%(default_value)s"
"%(separator)s")
outputFile = os.path.expanduser(outputFile)
# Remove the answer file so it can be recreated as the current user with
# the mode -rw-------
if os.path.exists(outputFile):
os.remove(outputFile)
fd = os.open(outputFile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0600)
with os.fdopen(fd, "w") as ans_file:
ans_file.write("[general]%s" % os.linesep)
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
comm = param.USAGE or ''
comm = textwrap.fill(comm,
initial_indent='%s# ' % sep,
subsequent_indent='# ',
break_long_words=False)
value = controller.CONF.get(param.CONF_NAME,
param.DEFAULT_VALUE)
args = {'comment': comm,
'separator': sep,
'default_value': overrides.get(param.CONF_NAME, value),
'conf_name': param.CONF_NAME}
ans_file.write(fmt % args)
def single_step_aio_install(options):
""" Installs an All in One host on this host"""
options.install_hosts = utils.get_localhost_ip()
# Also allow the command line to set values for any of these options
# by testing if they have been set before we set them here
if not options.os_swift_install:
options.os_swift_install = "y"
if not options.nagios_install:
options.nagios_install = "y"
if not options.novanetwork_pubif:
options.novanetwork_pubif = utils.device_from_ip(options.install_hosts)
if not options.novacompute_privif:
options.novacompute_privif = "lo"
if not options.novanetwork_privif:
options.novanetwork_privif = "lo"
# If we are doing an all-in-one install and neutron isn't disabled
# go ahead and set up a basic network and external bridge unless
# specifically told not to
if options.os_neutron_install != "n":
if not options.provision_demo:
options.provision_demo = "y"
if not options.provision_all_in_one_ovs_bridge:
options.provision_all_in_one_ovs_bridge = "y"
single_step_install(options)
def single_step_install(options):
answerfilepath = _getanswerfilepath()
if not answerfilepath:
_printAdditionalMessages()
return
# We're going to generate the answerfile and run Packstack in a single step
# todo this we generate the answerfile and pass in some override variables to
# override the default hosts
overrides = {}
hosts = options.install_hosts
hosts = [host.strip() for host in hosts.split(',')]
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
# and directives that contain _HOST are set to the controller node
if param.CONF_NAME.find("_HOST") != -1:
overrides[param.CONF_NAME] = hosts[0]
# If there are more than one host, all but the first are a compute nodes
if len(hosts) > 1:
overrides["CONFIG_NOVA_COMPUTE_HOSTS"] = ','.join(hosts[1:])
# We can also override defaults with command line options
_set_command_line_values(options)
for key,value in commandLineValues.items():
overrides[key] = value
generateAnswerFile(answerfilepath, overrides)
_main(answerfilepath)
def initCmdLineParser():
"""
Initiate the optparse object, add all the groups and general command line flags
and returns the optparse object
"""
# Init parser and all general flags
logging.debug("initiating command line option parser")
usage = "usage: %prog [options] [--help]"
parser = OptionParser(usage)
parser.add_option("--gen-answer-file", help="Generate a template of an answer file, using this option excludes all other options")
parser.add_option("--answer-file", help="Runs the configuration in non-interactive mode, extracting all information from the \
configuration file. using this option excludes all other options")
parser.add_option("--install-hosts", help="Install on a set of hosts in a single step. The format should be a comma separated list "
"of hosts, the first is setup as a controller, and the others are setup as compute nodes."
"if only a single host is supplied then it is setup as an all in one installation. An answerfile "
"will also be generated and should be used if Packstack needs to be run a second time ")
parser.add_option("--allinone", action="store_true", help="Shorthand for --install-hosts=<local ipaddr> --novanetwork-pubif=<dev> "
"--novacompute-privif=lo --novanetwork-privif=lo --os-swift-install=y --nagios-install=y "
", this option can be used to install an all in one OpenStack on this host")
parser.add_option("-o", "--options", action="store_true", dest="options", help="Print details on options available in answer file(rst format)")
parser.add_option("-d", "--debug", action="store_true", default=False, help="Enable debug in logging")
# For each group, create a group option
for group in controller.getAllGroups():
groupParser = OptionGroup(parser, group.DESCRIPTION)
for param in group.parameters.itervalues():
cmdOption = param.CMD_OPTION
paramUsage = param.USAGE
optionsList = param.OPTION_LIST
useDefault = param.USE_DEFAULT
if not useDefault:
groupParser.add_option("--%s" % cmdOption, help=paramUsage)
# Add group parser to main parser
parser.add_option_group(groupParser)
return parser
def printOptions():
"""
print and document the available options to the answer file (rst format)
"""
# For each group, create a group option
for group in controller.getAllGroups():
print "%s" % group.DESCRIPTION
print "-" * len(group.DESCRIPTION)
print
for param in group.parameters.itervalues():
cmdOption = param.CONF_NAME
paramUsage = param.USAGE
optionsList = param.OPTION_LIST or ""
print "%s : %s %s"%(("**%s**"%str(cmdOption)).ljust(30), paramUsage, optionsList)
print
def plugin_compare(x, y):
"""
Used to sort the plugin file list
according to the number at the end of the plugin module
"""
x_match = re.search(".+\_(\d\d\d)", x)
x_cmp = x_match.group(1)
y_match = re.search(".+\_(\d\d\d)", y)
y_cmp = y_match.group(1)
return int(x_cmp) - int(y_cmp)
def loadPlugins():
"""
Load All plugins from ./plugins
"""
sys.path.append(basedefs.DIR_PLUGINS)
sys.path.append(basedefs.DIR_MODULES)
fileList = [f for f in os.listdir(basedefs.DIR_PLUGINS) if f[0] != "_"]
fileList = sorted(fileList, cmp=plugin_compare)
for item in fileList:
# Looking for files that end with ###.py, example: a_plugin_100.py
match = re.search("^(.+\_\d\d\d)\.py$", item)
if match:
try:
moduleToLoad = match.group(1)
logging.debug("importing module %s, from file %s", moduleToLoad, item)
moduleobj = __import__(moduleToLoad)
moduleobj.__file__ = os.path.join(basedefs.DIR_PLUGINS, item)
globals()[moduleToLoad] = moduleobj
checkPlugin(moduleobj)
controller.addPlugin(moduleobj)
except:
logging.error("Failed to load plugin from file %s", item)
logging.error(traceback.format_exc())
raise Exception("Failed to load plugin from file %s" % item)
def checkPlugin(plugin):
for funcName in ['initConfig','initSequences']:
if not hasattr(plugin, funcName):
raise ImportError("Plugin %s does not contain the %s function" % (plugin.__class__, funcName))
def countCmdLineFlags(options, flag):
"""
counts all command line flags that were supplied, excluding the supplied flag name
"""
counter = 0
# make sure only flag was supplied
for key, value in options.__dict__.items():
if key == flag:
next
# Do not count --debug
elif key == 'debug':
next
# If anything but flag was called, increment
elif value:
counter += 1
return counter
def validateSingleFlag(options, flag):
counter = countCmdLineFlags(options, flag)
if counter > 0:
flag = flag.replace("_","-")
msg = output_messages.ERR_ONLY_1_FLAG % ("--%s" % flag)
raise FlagValidationError(msg)
def setProvisioningDefaults():
pnames = ['CONFIG_PROVISION_' + x for x in ['DEMO', 'TEMPEST', 'ALL_IN_ONE_OVS_BRIDGE']]
params = [controller.getParamByName(x) for x in pnames]
for param in params:
controller.CONF[param.CONF_NAME] = (
controller.CONF.get(param.CONF_NAME, param.DEFAULT_VALUE)
)
def initPluginsConfig():
for plugin in controller.getAllPlugins():
plugin.initConfig(controller)
def initPluginsSequences():
setProvisioningDefaults()
for plugin in controller.getAllPlugins():
plugin.initSequences(controller)
def _set_command_line_values(options):
for key, value in options.__dict__.items():
# Replace the _ with - in the string since optparse replace _ with -
for group in controller.getAllGroups():
param = group.search("CMD_OPTION", key.replace("_","-"))
if len(param) > 0 and value:
commandLineValues[param[0].CONF_NAME] = value
def main():
try:
# Load Plugins
loadPlugins()
initPluginsConfig()
optParser = initCmdLineParser()
# Do the actual command line parsing
# Try/Except are here to catch the silly sys.exit(0) when calling rhevm-setup --help
(options, args) = optParser.parse_args()
if options.options:
printOptions()
raise SystemExit
# Initialize logging
initLogging (options.debug)
# Parse parameters
runConfiguration = True
confFile = None
# If --gen-answer-file was supplied, do not run main
if options.gen_answer_file:
# Make sure only --gen-answer-file was supplied
validateSingleFlag(options, "gen_answer_file")
generateAnswerFile(options.gen_answer_file)
# Are we installing an all in one
elif options.allinone:
if getattr(options, 'answer_file', None):
msg = ('Please use either --allinone or --answer-file, '
'but not both.')
raise FlagValidationError(msg)
single_step_aio_install(options)
# Are we installing in a single step
elif options.install_hosts:
single_step_install(options)
# Otherwise, run main()
else:
# Make sure only --answer-file was supplied
if options.answer_file:
validateSingleFlag(options, "answer_file")
confFile = os.path.expanduser(options.answer_file)
if not os.path.exists(confFile):
raise Exception(output_messages.ERR_NO_ANSWER_FILE % confFile)
else:
_set_command_line_values(options)
_main(confFile)
except FlagValidationError, ex:
optParser.error(str(ex))
except Exception as e:
logging.error(traceback.format_exc())
print
print utils.color_text("ERROR : " + str(e), 'red')
print output_messages.ERR_CHECK_LOG_FILE_FOR_MORE_INFO%(logFile)
sys.exit(1)
finally:
remove_remote_var_dirs()
# Always print user params to log
_printAdditionalMessages()
_summaryParamsToLog()
if __name__ == "__main__":
main()
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Piecewise Rational Quadratic Spline bijector."""
import collections
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
def _ensure_at_least_1d(t):
t = tf.convert_to_tensor(t)
return t + tf.zeros([1], dtype=t.dtype)
def _padded(t, lhs, rhs=None):
"""Left pads and optionally right pads the innermost axis of `t`."""
lhs = tf.convert_to_tensor(lhs, dtype=t.dtype)
zeros = tf.zeros([tf.rank(t) - 1, 2], dtype=tf.int32)
lhs_paddings = tf.concat([zeros, [[1, 0]]], axis=0)
result = tf.pad(t, paddings=lhs_paddings, constant_values=lhs)
if rhs is not None:
rhs = tf.convert_to_tensor(rhs, dtype=t.dtype)
rhs_paddings = tf.concat([zeros, [[0, 1]]], axis=0)
result = tf.pad(result, paddings=rhs_paddings, constant_values=rhs)
return result
def _knot_positions(bin_sizes, range_min):
return _padded(tf.cumsum(bin_sizes, axis=-1) + range_min, lhs=range_min)
_SplineShared = collections.namedtuple(
'SplineShared', 'out_of_bounds,x_k,y_k,d_k,d_kp1,h_k,w_k,s_k')
class RationalQuadraticSpline(bijector.AutoCompositeTensorBijector):
"""A piecewise rational quadratic spline, as developed in [1].
This transformation represents a monotonically increasing piecewise rational
quadratic function. Outside of the bounds of `knot_x`/`knot_y`, the transform
behaves as an identity function.
Typically this bijector will be used as part of a chain, with splines for
trailing `x` dimensions conditioned on some of the earlier `x` dimensions, and
with the inverse then solved first for unconditioned dimensions, then using
conditioning derived from those inverses, and so forth. For example, if we
split a 15-D `xs` vector into 3 components, we may implement a forward and
inverse as follows:
```python
nsplits = 3
class SplineParams(tf.Module):
def __init__(self, nbins=32, interval_width=2, range_min=-1,
min_bin_width=1e-3, min_slope=1e-3):
self._nbins = nbins
self._interval_width = interval_width # Sum of bin widths.
self._range_min = range_min # Position of first knot.
self._min_bin_width = min_bin_width # Bin width lower bound.
self._min_slope = min_slope # Lower bound for slopes at internal knots.
self._built = False
self._bin_widths = None
self._bin_heights = None
self._knot_slopes = None
def __call__(self, x, nunits):
if not self._built:
def _bin_positions(x):
out_shape = tf.concat((tf.shape(x)[:-1], (nunits, self._nbins)), 0)
x = tf.reshape(x, out_shape)
return tf.math.softmax(x, axis=-1) * (
self._interval_width - self._nbins * self._min_bin_width
) + self._min_bin_width
def _slopes(x):
out_shape = tf.concat((
tf.shape(x)[:-1], (nunits, self._nbins - 1)), 0)
x = tf.reshape(x, out_shape)
return tf.math.softplus(x) + self._min_slope
self._bin_widths = tf.keras.layers.Dense(
nunits * self._nbins, activation=_bin_positions, name='w')
self._bin_heights = tf.keras.layers.Dense(
nunits * self._nbins, activation=_bin_positions, name='h')
self._knot_slopes = tf.keras.layers.Dense(
nunits * (self._nbins - 1), activation=_slopes, name='s')
self._built = True
return tfb.RationalQuadraticSpline(
bin_widths=self._bin_widths(x),
bin_heights=self._bin_heights(x),
knot_slopes=self._knot_slopes(x),
range_min=self._range_min)
xs = np.random.randn(3, 15).astype(np.float32) # Keras won't Dense(.)(vec).
splines = [SplineParams() for _ in range(nsplits)]
def spline_flow():
stack = tfb.Identity()
for i in range(nsplits):
stack = tfb.RealNVP(5 * i, bijector_fn=splines[i])(stack)
return stack
ys = spline_flow().forward(xs)
ys_inv = spline_flow().inverse(ys) # ys_inv ~= xs
```
For a one-at-a-time autoregressive flow as in [1], it would be profitable to
implement a mask over `xs` to parallelize either the inverse or the forward
pass and implement the other using a `tf.while_loop`. See
`tfp.bijectors.MaskedAutoregressiveFlow` for support doing so (paired with
`tfp.bijectors.Invert` depending which direction should be parallel).
#### References
[1]: Conor Durkan, Artur Bekasov, Iain Murray, George Papamakarios. Neural
Spline Flows. _arXiv preprint arXiv:1906.04032_, 2019.
https://arxiv.org/abs/1906.04032
"""
def __init__(self,
bin_widths,
bin_heights,
knot_slopes,
range_min=-1,
validate_args=False,
name=None):
"""Construct a new RationalQuadraticSpline bijector.
For each argument, the innermost axis indexes bins/knots and batch axes
index axes of `x`/`y` spaces. A `RationalQuadraticSpline` with a separate
transform for each of three dimensions might have `bin_widths` shaped
`[3, 32]`. To use the same spline for each of `x`'s three dimensions we may
broadcast against `x` and use a `bin_widths` parameter shaped `[32]`.
Parameters will be broadcast against each other and against the input
`x`/`y`s, so if we want fixed slopes, we can use kwarg `knot_slopes=1`.
A typical recipe for acquiring compatible bin widths and heights would be:
```python
nbins = unconstrained_vector.shape[-1]
range_min, range_max, min_bin_size = -1, 1, 1e-2
scale = range_max - range_min - nbins * min_bin_size
bin_widths = tf.math.softmax(unconstrained_vector) * scale + min_bin_size
```
Args:
bin_widths: The widths of the spans between subsequent knot `x` positions,
a floating point `Tensor`. Must be positive, and at least 1-D. Innermost
axis must sum to the same value as `bin_heights`. The knot `x` positions
will be a first at `range_min`, followed by knots at `range_min +
cumsum(bin_widths, axis=-1)`.
bin_heights: The heights of the spans between subsequent knot `y`
positions, a floating point `Tensor`. Must be positive, and at least
1-D. Innermost axis must sum to the same value as `bin_widths`. The knot
`y` positions will be a first at `range_min`, followed by knots at
`range_min + cumsum(bin_heights, axis=-1)`.
knot_slopes: The slope of the spline at each knot, a floating point
`Tensor`. Must be positive. `1`s are implicitly padded for the first and
last implicit knots corresponding to `range_min` and `range_min +
sum(bin_widths, axis=-1)`. Innermost axis size should be 1 less than
that of `bin_widths`/`bin_heights`, or 1 for broadcasting.
range_min: The `x`/`y` position of the first knot, which has implicit
slope `1`. `range_max` is implicit, and can be computed as `range_min +
sum(bin_widths, axis=-1)`. Scalar floating point `Tensor`.
validate_args: Toggles argument validation (can hurt performance).
name: Optional name scope for associated ops. (Defaults to
`'RationalQuadraticSpline'`).
"""
parameters = dict(locals())
with tf.name_scope(name or 'RationalQuadraticSpline') as name:
dtype = dtype_util.common_dtype(
[bin_widths, bin_heights, knot_slopes, range_min],
dtype_hint=tf.float32)
self._bin_widths = tensor_util.convert_nonref_to_tensor(
bin_widths, dtype=dtype, name='bin_widths')
self._bin_heights = tensor_util.convert_nonref_to_tensor(
bin_heights, dtype=dtype, name='bin_heights')
self._knot_slopes = tensor_util.convert_nonref_to_tensor(
knot_slopes, dtype=dtype, name='knot_slopes')
self._range_min = tensor_util.convert_nonref_to_tensor(
range_min, dtype=dtype, name='range_min')
super(RationalQuadraticSpline, self).__init__(
dtype=dtype,
forward_min_event_ndims=0,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
bin_widths=parameter_properties.ParameterProperties(
event_ndims=1,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED),
bin_heights=parameter_properties.ParameterProperties(
event_ndims=1,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED),
knot_slopes=parameter_properties.ParameterProperties(
event_ndims=1,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
range_min=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,))
@property
def bin_widths(self):
return self._bin_widths
@property
def bin_heights(self):
return self._bin_heights
@property
def knot_slopes(self):
return self._knot_slopes
@property
def range_min(self):
return self._range_min
@classmethod
def _is_increasing(cls):
return True
def _compute_shared(self, x=None, y=None):
"""Captures shared computations across forward/inverse/logdet.
Only one of `x` or `y` should be specified.
Args:
x: The `x` values we will search for.
y: The `y` values we will search for.
Returns:
data: A namedtuple with named fields containing shared computations.
"""
assert (x is None) != (y is None)
is_x = x is not None
range_min = tf.convert_to_tensor(self.range_min, name='range_min')
kx = _knot_positions(self.bin_widths, range_min)
ky = _knot_positions(self.bin_heights, range_min)
kd = _padded(_ensure_at_least_1d(self.knot_slopes), lhs=1, rhs=1)
kx_or_ky = kx if is_x else ky
kx_or_ky_min = kx_or_ky[..., 0]
kx_or_ky_max = kx_or_ky[..., -1]
x_or_y = x if is_x else y
out_of_bounds = (x_or_y <= kx_or_ky_min) | (x_or_y >= kx_or_ky_max)
x_or_y = tf.where(out_of_bounds, kx_or_ky_min, x_or_y)
shape = functools.reduce(
tf.broadcast_dynamic_shape,
(
tf.shape(x_or_y[..., tf.newaxis]), # Add a n_knots dim.
tf.shape(kx),
tf.shape(ky),
tf.shape(kd)))
bc_x_or_y = tf.broadcast_to(x_or_y, shape[:-1])
bc_kx = tf.broadcast_to(kx, shape)
bc_ky = tf.broadcast_to(ky, shape)
bc_kd = tf.broadcast_to(kd, shape)
bc_kx_or_ky = bc_kx if is_x else bc_ky
indices = tf.maximum(
tf.zeros([], dtype=tf.int64),
tf.searchsorted(
bc_kx_or_ky[..., :-1],
bc_x_or_y[..., tf.newaxis],
side='right',
out_type=tf.int64) - 1)
def gather_squeeze(params, indices):
rank = tensorshape_util.rank(indices.shape)
if rank is None:
raise ValueError('`indices` must have statically known rank.')
return tf.gather(params, indices, axis=-1, batch_dims=rank - 1)[..., 0]
x_k = gather_squeeze(bc_kx, indices)
x_kp1 = gather_squeeze(bc_kx, indices + 1)
y_k = gather_squeeze(bc_ky, indices)
y_kp1 = gather_squeeze(bc_ky, indices + 1)
d_k = gather_squeeze(bc_kd, indices)
d_kp1 = gather_squeeze(bc_kd, indices + 1)
h_k = y_kp1 - y_k
w_k = x_kp1 - x_k
s_k = h_k / w_k
return _SplineShared(
out_of_bounds=out_of_bounds,
x_k=x_k,
y_k=y_k,
d_k=d_k,
d_kp1=d_kp1,
h_k=h_k,
w_k=w_k,
s_k=s_k)
def _forward(self, x):
"""Compute the forward transformation (Appendix A.1)."""
d = self._compute_shared(x=x)
relx = (x - d.x_k) / d.w_k
spline_val = (
d.y_k + ((d.h_k * (d.s_k * relx**2 + d.d_k * relx * (1 - relx))) /
(d.s_k + (d.d_kp1 + d.d_k - 2 * d.s_k) * relx * (1 - relx))))
y_val = tf.where(d.out_of_bounds, x, spline_val)
return y_val
def _inverse(self, y):
"""Compute the inverse transformation (Appendix A.3)."""
d = self._compute_shared(y=y)
rely = tf.where(d.out_of_bounds, tf.zeros([], dtype=y.dtype), y - d.y_k)
term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)
# These terms are the a, b, c terms of the quadratic formula.
a = d.h_k * (d.s_k - d.d_k) + term2
b = d.h_k * d.d_k - term2
c = -d.s_k * rely
# The expression used here has better numerical behavior for small 4*a*c.
relx = tf.where(
tf.equal(rely, 0), tf.zeros([], dtype=a.dtype),
(2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))
return tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)
def _forward_log_det_jacobian(self, x):
"""Compute the forward derivative (Appendix A.2)."""
d = self._compute_shared(x=x)
relx = (x - d.x_k) / d.w_k
relx = tf.where(d.out_of_bounds, tf.constant(.5, x.dtype), relx)
grad = (
2 * tf.math.log(d.s_k) +
tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln
d.d_k * (1 - relx)**2) -
2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *
(1 - relx) + d.s_k))
return tf.where(d.out_of_bounds, tf.zeros([], dtype=x.dtype), grad)
def _parameter_control_dependencies(self, is_init):
"""Validate parameters."""
bw, bh, kd = None, None, None
try:
shape = tf.broadcast_static_shape(self.bin_widths.shape,
self.bin_heights.shape)
except ValueError as e:
raise ValueError('`bin_widths`, `bin_heights` must broadcast: {}'.format(
str(e)))
bin_sizes_shape = shape
try:
shape = tf.broadcast_static_shape(shape[:-1], self.knot_slopes.shape[:-1])
except ValueError as e:
raise ValueError(
'`bin_widths`, `bin_heights`, and `knot_slopes` must broadcast on '
'batch axes: {}'.format(str(e)))
assertions = []
if (tensorshape_util.is_fully_defined(bin_sizes_shape[-1:]) and
tensorshape_util.is_fully_defined(self.knot_slopes.shape[-1:])):
if tensorshape_util.rank(self.knot_slopes.shape) > 0:
num_interior_knots = tensorshape_util.dims(bin_sizes_shape)[-1] - 1
if tensorshape_util.dims(
self.knot_slopes.shape)[-1] not in (1, num_interior_knots):
raise ValueError(
'Innermost axis of non-scalar `knot_slopes` must broadcast with '
'{}; got {}.'.format(num_interior_knots, self.knot_slopes.shape))
elif self.validate_args:
if is_init != any(
tensor_util.is_ref(t)
for t in (self.bin_widths, self.bin_heights, self.knot_slopes)):
bw = tf.convert_to_tensor(self.bin_widths) if bw is None else bw
bh = tf.convert_to_tensor(self.bin_heights) if bh is None else bh
kd = _ensure_at_least_1d(self.knot_slopes) if kd is None else kd
shape = tf.broadcast_dynamic_shape(
tf.shape((bw + bh)[..., :-1]), tf.shape(kd))
assertions.append(
assert_util.assert_greater(
tf.shape(shape)[0],
tf.zeros([], dtype=shape.dtype),
message='`(bin_widths + bin_heights)[..., :-1]` must broadcast '
'with `knot_slopes` to at least 1-D.'))
if not self.validate_args:
assert not assertions
return assertions
if (is_init != tensor_util.is_ref(self.bin_widths) or
is_init != tensor_util.is_ref(self.bin_heights)):
bw = tf.convert_to_tensor(self.bin_widths) if bw is None else bw
bh = tf.convert_to_tensor(self.bin_heights) if bh is None else bh
assertions += [
assert_util.assert_near(
tf.reduce_sum(bw, axis=-1),
tf.reduce_sum(bh, axis=-1),
message='`sum(bin_widths, axis=-1)` must equal '
'`sum(bin_heights, axis=-1)`.'),
]
if is_init != tensor_util.is_ref(self.bin_widths):
bw = tf.convert_to_tensor(self.bin_widths) if bw is None else bw
assertions += [
assert_util.assert_positive(
bw, message='`bin_widths` must be positive.'),
]
if is_init != tensor_util.is_ref(self.bin_heights):
bh = tf.convert_to_tensor(self.bin_heights) if bh is None else bh
assertions += [
assert_util.assert_positive(
bh, message='`bin_heights` must be positive.'),
]
if is_init != tensor_util.is_ref(self.knot_slopes):
kd = _ensure_at_least_1d(self.knot_slopes) if kd is None else kd
assertions += [
assert_util.assert_positive(
kd, message='`knot_slopes` must be positive.'),
]
return assertions
|
|
# -*- coding: utf-8 -*-
'''
Wrapper around Server Density API
=================================
.. versionadded:: 2014.7.0
'''
# Import Python libs
from __future__ import absolute_import
import json
import logging
import os
import tempfile
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import map # pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.exceptions import CommandExecutionError
try:
import requests
ENABLED = True
except ImportError:
ENABLED = False
log = logging.getLogger(__name__)
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
if not ENABLED:
return False
return "serverdensity_device"
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'):
'''
Returns requested Server Density authentication value from pillar.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.get_sd_auth <val>
'''
sd_pillar = __pillar__.get(sd_auth_pillar_name)
log.debug('Server Density Pillar: {0}'.format(sd_pillar))
if not sd_pillar:
log.error('Could not load {0} pillar'.format(sd_auth_pillar_name))
raise CommandExecutionError(
'{0} pillar is required for authentication'.format(sd_auth_pillar_name)
)
try:
return sd_pillar[val]
except KeyError:
log.error('Could not find value {0} in pillar'.format(val))
raise CommandExecutionError('{0} value was not found in pillar'.format(val))
def _clean_salt_variables(params, variable_prefix="__"):
'''
Pops out variables from params which starts with `variable_prefix`.
'''
list(list(map(params.pop, [k for k in params if k.startswith(variable_prefix)])))
return params
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: {0}'.format(params))
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def delete(device_id):
'''
Delete a device from Server Density. For more information, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Deleting
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.delete 51f7eafcdba4bb235e000ae4
'''
api_response = requests.delete(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def ls(**params):
'''
List devices in Server Density
Results will be filtered by any params passed to this function. For more
information, see the API docs on listing_ and searching_.
.. _listing: https://apidocs.serverdensity.com/Inventory/Devices/Listing
.. _searching: https://apidocs.serverdensity.com/Inventory/Devices/Searching
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.ls
salt '*' serverdensity_device.ls name=lama
salt '*' serverdensity_device.ls name=lama group=lama_band installedRAM=32768
'''
params = _clean_salt_variables(params)
endpoint = 'devices'
# Change endpoint if there are params to filter by:
if params:
endpoint = 'resources'
# Convert all ints to strings:
for key, val in six.iteritems(params):
params[key] = str(val)
api_response = requests.get(
'https://api.serverdensity.io/inventory/{0}'.format(endpoint),
params={'token': get_sd_auth('api_token'), 'filter': json.dumps(params)}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, Server Density API Response: {0}'
.format(api_response)
)
else:
return None
def update(device_id, **params):
'''
Updates device information in Server Density. For more information see the
`API docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Updating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=lama group=lama_band
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=better_lama group=rock_lamas swapSpace=512
'''
params = _clean_salt_variables(params)
api_response = requests.put(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def install_agent(agent_key):
'''
Function downloads Server Density installation agent, and installs sd-agent
with agent_key.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498
'''
work_dir = os.path.join(__opts__['cachedir'], 'tmp')
if not os.path.isdir(work_dir):
os.mkdir(work_dir)
install_file = tempfile.NamedTemporaryFile(dir=work_dir,
suffix='.sh',
delete=False)
install_filename = install_file.name
install_file.close()
account_url = get_sd_auth('account_url')
__salt__['cmd.run'](
cmd='curl https://www.serverdensity.com/downloads/agent-install.sh -o {0}'.format(install_filename),
cwd=work_dir
)
__salt__['cmd.run'](cmd='chmod +x {0}'.format(install_filename), cwd=work_dir)
return __salt__['cmd.run'](
cmd='./{filename} -a {account_url} -k {agent_key}'.format(
filename=install_filename, account_url=account_url, agent_key=agent_key),
cwd=work_dir
)
|
|
import os
import sys
import re
import types
import itertools
import numpy
import CGAT.Stats as Stats
import CGAT.IndexedGenome as IndexedGenome
from CGATReport.Tracker import *
from PeakcallingReport import *
##########################################################################
##########################################################################
##########################################################################
##
##########################################################################
class OverlapsBase(DefaultTracker):
"""Overlap between sets.
This tracker returns the overlap between a track and
all other tracks. Only one attribute is returned
given by :attr:`mColumn`. As the table is not symmetrized,
the mColumn attribute should be specified without the
suffix, i.e. ('nbases_unique' instead of 'nbases_unique1').
Possible values for mColumn are combinations of 'A_B'.
A is one of ('nbases', 'nexons', 'ngenes', 'pbases', 'pexons', 'pgenes')
where the prefix ''n'' or ''p'' denote the counts or the percent, respectively.
B is one of ('total', 'uniq', 'ovl')
"""
tablename = None
column = None
pattern = "(.*)_intervals$"
def getSlices(self, subset=None):
if subset is not None:
return subset
else:
return []
def __call__(self, track, slice=None):
tablename = self.tablename
if self.column is None:
raise NotImplementedError("column not set in derived class.")
column = self.column
if slice is None:
result = odict(self.get("SELECT set2, %(column)s1 FROM %(tablename)s WHERE set1 = '%(track)s'" % locals()) +
self.get("SELECT set1, %(column)s2 FROM %(tablename)s WHERE set2 = '%(track)s'" % locals()))
elif "-" in slice:
slices = "','".join(slice.split("-"))
result = odict(self.get("SELECT set2, %(column)s1 FROM %(tablename)s WHERE set1 = '%(track)s' AND set2 IN ('%(slices)s')" % locals()) +
self.get("SELECT set1, %(column)s2 FROM %(tablename)s WHERE set2 = '%(track)s' AND set1 IN ('%(slices)s')" % locals()))
else:
result = odict(self.get("SELECT set2, %(column)s1 FROM %(tablename)s WHERE set1 = '%(track)s' AND set2 = '%(slice)s'" % locals()) +
self.get("SELECT set1, %(column)s2 FROM %(tablename)s WHERE set2 = '%(track)s' AND set1 = '%(slice)s'" % locals()))
return result
class ExonsCounts(OverlapsBase):
column = "nexons_ovl"
class ExonsPercent(OverlapsBase):
column = "pexons_ovl"
def __call__(self, track, slice=None):
# add the diagonal element of 100%
x = OverlapsBase.__call__(self, track, slice)
x[track] = 100.0
return x
class BasesCounts(OverlapsBase):
column = "nbases_ovl"
class BasesPercent(OverlapsBase):
column = "pbases_ovl"
def __call__(self, track, slice=None):
x = OverlapsBase.__call__(self, track, slice)
x[track] = 100.0
return x
class BasesNormalized(OverlapsBase):
column = "nbases_ovl"
def __call__(self, track, slice=None):
tablename = self.tablename
if self.column is None:
raise NotImplementedError("column not set in derived class.")
column = self.column
return odict(self.get("SELECT set2, 100.0 * nbases_ovl1 / (nbases_ovl1 + nbases_unique1 + nbases_unique2) FROM %(tablename)s WHERE set1 = '%(track)s'" % locals()) +
self.get("SELECT set1, 100.0 * nbases_ovl2 / (nbases_ovl2 + nbases_unique1 + nbases_unique2) FROM %(tablename)s WHERE set2 = '%(track)s'" % locals()) +
[(track, 100.0)])
# =================================================================
# mixin classes for table
# =================================================================
class Overlaps(object):
tablename = "overlap"
class OverlapsUCSC(object):
tablename = "ucsc_overlap"
# =================================================================
# specific implementations of Annotator results
# =================================================================
_overlap_analysis = {
"ExonsCounts": ExonsCounts,
"ExonsPercent": ExonsPercent,
"BasesCounts": BasesCounts,
"BasesPercent": BasesPercent,
"BasesNormalized": BasesNormalized,
}
_overlap_tables = {
"Overlaps": Overlaps,
"UCSCOverlaps": OverlapsUCSC,
}
# the order of the base classes is important
# also: make sure that these are new-style classes
for a, aa in list(_overlap_analysis.items()):
for b, bb in list(_overlap_tables.items()):
n = "%s%s" % (b, a)
globals()[n] = type(n, (bb, aa), {})
##########################################################################
##########################################################################
##########################################################################
##
##########################################################################
class OverlapVersusPeakval(DefaultTracker):
"""Overlap between experiments
"""
tablename = "reproducibility"
pattern = "(.*)_reproducibility$"
def __call__(self, track, slice=None):
data = self.get(
"SELECT pexons_union, pexons_ovl FROM %(track)s_%(tablename)s" % locals())
return odict(list(zip(("recall", "reproducibility"), list(zip(*data)))))
class OverlapROC(DefaultTracker):
"""Overlap between experiments.
This tracker computes ROC curves examining various
interval variables to see if they improve reproducibility.
True positives are those intervals which reproducible, i.e.,
appear in all biological replicates.
"""
pattern = "(.*)_reproducibility$"
mFields = ("peakval", "avgval", "length")
mXLabel = "FPR"
mYLabel = "TPR"
def __call__(self, track, slice=None):
result = odict()
merged = None
rocs = []
for field in self.mFields:
data = []
for replicate in EXPERIMENTS.getTracks(track):
statement = "SELECT contig, start, end,%(field)s FROM %(replicate)s_intervals" % locals(
)
data.append(self.get(statement))
idx = []
for x in range(len(data)):
i = IndexedGenome.IndexedGenome()
for contig, start, end, peakval in data[x]:
i.add(contig, start, end, peakval)
idx.append(i)
def _iter(all):
all.sort()
last_contig, first_start, last_end, last_value = all[0]
for contig, start, end, value in all[1:]:
if contig != last_contig or last_end < start:
yield (last_contig, first_start, last_end)
last_contig, first_start, last_end = contig, start, end
else:
last_end = max(last_end, end)
yield (last_contig, first_start, last_end)
if not merged:
all = [x for x in itertools.chain(*data)]
merged = list(_iter(all))
roc_data = []
for contig, start, end in merged:
intervals = []
for i in idx:
try:
intervals.append(list(i.get(contig, start, end)))
except KeyError:
continue
if len(intervals) == 0:
continue
is_repro = len([x for x in intervals if x != []]) == len(data)
value = max([x[2] for x in itertools.chain(*intervals)])
# fpr, tpr
roc_data.append((value, is_repro))
roc_data.sort()
roc_data.reverse()
roc = list(zip(*Stats.computeROC(roc_data)))
result[field] = odict((("FPR", roc[0]), (field, roc[1])))
return result
class OverlapMatrix(DefaultTracker):
pattern = "(.*)_reproducibility$"
field = "pexons_ovl"
def __call__(self, track):
data = self.get(
"SELECT set1, set2, %(field)s1, %(field)s2 FROM %(track)s_reproducibility")
rows = sorted(
list(set([x[0] for x in data]).union(set([x[1] for x in data]))))
map_row2index = dict([(x[1], x[0]) for x in enumerate(rows)])
matrix = numpy.zeros((len(rows), len(rows)))
for row, col, value1, value2 in data:
matrix[map_row2index[row]][map_row2index[col]] = value1
matrix[map_row2index[col]][map_row2index[row]] = value2
return odict((('matrix', matrix),
('rows', rows),
('columns', rows)))
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quaternion math.
This module assumes the xyzw quaternion format where xyz is the imaginary part
and w is the real part.
Functions in this module support both batched and unbatched quaternions.
"""
from jax import numpy as jnp
from jax.numpy import linalg
def safe_acos(t, eps=1e-8):
"""A safe version of arccos which avoids evaluating at -1 or 1."""
return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps))
def im(q):
"""Fetch the imaginary part of the quaternion."""
return q[..., :3]
def re(q):
"""Fetch the real part of the quaternion."""
return q[..., 3:]
def identity():
return jnp.array([0.0, 0.0, 0.0, 1.0])
def conjugate(q):
"""Compute the conjugate of a quaternion."""
return jnp.concatenate([-im(q), re(q)], axis=-1)
def inverse(q):
"""Compute the inverse of a quaternion."""
return normalize(conjugate(q))
def normalize(q):
"""Normalize a quaternion."""
return q / norm(q)
def norm(q):
return linalg.norm(q, axis=-1, keepdims=True)
def multiply(q1, q2):
"""Multiply two quaternions."""
c = (re(q1) * im(q2)
+ re(q2) * im(q1)
+ jnp.cross(im(q1), im(q2)))
w = re(q1) * re(q2) - jnp.dot(im(q1), im(q2))
return jnp.concatenate([c, w], axis=-1)
def rotate(q, v):
"""Rotate a vector using a quaternion."""
# Create the quaternion representation of the vector.
q_v = jnp.concatenate([v, jnp.zeros_like(v[..., :1])], axis=-1)
return im(multiply(multiply(q, q_v), conjugate(q)))
def log(q, eps=1e-8):
"""Computes the quaternion logarithm.
References:
https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions
Args:
q: the quaternion in (x,y,z,w) format.
eps: an epsilon value for numerical stability.
Returns:
The logarithm of q.
"""
mag = linalg.norm(q, axis=-1, keepdims=True)
v = im(q)
s = re(q)
w = jnp.log(mag)
denom = jnp.maximum(
linalg.norm(v, axis=-1, keepdims=True), eps * jnp.ones_like(v))
xyz = v / denom * safe_acos(s / eps)
return jnp.concatenate((xyz, w), axis=-1)
def exp(q, eps=1e-8):
"""Computes the quaternion exponential.
References:
https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions
Args:
q: the quaternion in (x,y,z,w) format or (x,y,z) if is_pure is True.
eps: an epsilon value for numerical stability.
Returns:
The exponential of q.
"""
is_pure = q.shape[-1] == 3
if is_pure:
s = jnp.zeros_like(q[..., -1:])
v = q
else:
v = im(q)
s = re(q)
norm_v = linalg.norm(v, axis=-1, keepdims=True)
exp_s = jnp.exp(s)
w = jnp.cos(norm_v)
xyz = jnp.sin(norm_v) * v / jnp.maximum(norm_v, eps * jnp.ones_like(norm_v))
return exp_s * jnp.concatenate((xyz, w), axis=-1)
def to_rotation_matrix(q):
"""Constructs a rotation matrix from a quaternion.
Args:
q: a (*,4) array containing quaternions.
Returns:
A (*,3,3) array containing rotation matrices.
"""
x, y, z, w = jnp.split(q, 4, axis=-1)
s = 1.0 / jnp.sum(q ** 2, axis=-1)
return jnp.stack([
jnp.stack([1 - 2 * s * (y ** 2 + z ** 2),
2 * s * (x * y - z * w),
2 * s * (x * z + y * w)], axis=0),
jnp.stack([2 * s * (x * y + z * w),
1 - s * 2 * (x ** 2 + z ** 2),
2 * s * (y * z - x * w)], axis=0),
jnp.stack([2 * s * (x * z - y * w),
2 * s * (y * z + x * w),
1 - 2 * s * (x ** 2 + y ** 2)], axis=0),
], axis=0)
def from_rotation_matrix(m, eps=1e-9):
"""Construct quaternion from a rotation matrix.
Args:
m: a (*,3,3) array containing rotation matrices.
eps: a small number for numerical stability.
Returns:
A (*,4) array containing quaternions.
"""
trace = jnp.trace(m)
m00 = m[..., 0, 0]
m01 = m[..., 0, 1]
m02 = m[..., 0, 2]
m10 = m[..., 1, 0]
m11 = m[..., 1, 1]
m12 = m[..., 1, 2]
m20 = m[..., 2, 0]
m21 = m[..., 2, 1]
m22 = m[..., 2, 2]
def tr_positive():
sq = jnp.sqrt(trace + 1.0) * 2. # sq = 4 * w.
w = 0.25 * sq
x = jnp.divide(m21 - m12, sq)
y = jnp.divide(m02 - m20, sq)
z = jnp.divide(m10 - m01, sq)
return jnp.stack((x, y, z, w), axis=-1)
def cond_1():
sq = jnp.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * x.
w = jnp.divide(m21 - m12, sq)
x = 0.25 * sq
y = jnp.divide(m01 + m10, sq)
z = jnp.divide(m02 + m20, sq)
return jnp.stack((x, y, z, w), axis=-1)
def cond_2():
sq = jnp.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * y.
w = jnp.divide(m02 - m20, sq)
x = jnp.divide(m01 + m10, sq)
y = 0.25 * sq
z = jnp.divide(m12 + m21, sq)
return jnp.stack((x, y, z, w), axis=-1)
def cond_3():
sq = jnp.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * z.
w = jnp.divide(m10 - m01, sq)
x = jnp.divide(m02 + m20, sq)
y = jnp.divide(m12 + m21, sq)
z = 0.25 * sq
return jnp.stack((x, y, z, w), axis=-1)
def cond_idx(cond):
cond = jnp.expand_dims(cond, -1)
cond = jnp.tile(cond, [1] * (len(m.shape) - 2) + [4])
return cond
where_2 = jnp.where(cond_idx(m11 > m22), cond_2(), cond_3())
where_1 = jnp.where(cond_idx((m00 > m11) & (m00 > m22)), cond_1(), where_2)
return jnp.where(cond_idx(trace > 0), tr_positive(), where_1)
|
|
#!/usr/bin/env python
# coding: utf-8
from datetime import datetime
from distutils import spawn
import argparse
import json
import os
import platform
import shutil
import socket
import sys
import urllib
import urllib2
__version__ = '6.0.2'
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-d', '--dependencies', dest='install_dependencies', action='store_true',
help='install virtualenv and python dependencies',
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
PARSER.add_argument(
'-v', '--version', dest='show_version', action='store_true',
help='Show gae-init version',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
BAD_ENDINGS = ['pyc', 'pyo', '~']
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_LIB = '%s.zip' % DIR_LIB
FILE_REQUIREMENTS = 'requirements.txt'
FILE_PIP_GUARD = os.path.join(DIR_TEMP, 'pip.guard')
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
FILE_UPDATE = os.path.join(DIR_TEMP, 'update.json')
###############################################################################
# Other global variables
###############################################################################
CORE_VERSION_URL = 'https://gae-init.appspot.com/_s/version/'
INTERNET_TEST_URL = 'https://www.google.com'
REQUIREMENTS_URL = 'http://docs.gae-init.appspot.com/requirement/'
TRAVIS = 'TRAVIS' in os.environ
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print('[%s] %12s %s' % (timestamp, script, filename))
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def listdir(directory, split_ext=False):
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
return True
def exec_pip_commands(command):
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append('%s SKIP_GOOGLEAPICLIENT_COMPAT_CHECK=1' %
('set' if IS_WINDOWS else 'export'))
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
return os.system(script)
def make_guard(fname, cmd, spec):
with open(fname, 'w') as guard:
guard.write('Prevents %s execution if newer than %s' % (cmd, spec))
def guard_is_newer(guard, watched):
if os.path.exists(guard):
return os.path.getmtime(guard) > os.path.getmtime(watched)
return False
def check_if_pip_should_run():
return not guard_is_newer(FILE_PIP_GUARD, FILE_REQUIREMENTS)
def install_py_libs():
return_code = 0
if not check_if_pip_should_run() and os.path.exists(DIR_LIB):
return return_code
make_guard_flag = True
if TRAVIS:
return_code = exec_pip_commands('pip install -v -r %s' % FILE_REQUIREMENTS)
else:
return_code = exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
if return_code:
print('ERROR running pip install')
make_guard_flag = False
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info', '.so']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = [
'test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL',
'easy_install.py', 'pkg_resources', 'pkg_resources.py'
]
def _exclude_prefix(pkg):
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg):
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg):
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
if make_guard_flag:
make_guard(FILE_PIP_GUARD, 'pip', FILE_REQUIREMENTS)
return return_code
def install_dependencies():
make_dirs(DIR_TEMP)
return install_py_libs()
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': __version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def print_out_update(force_show=False):
try:
import pip
SemVer = pip.util.version.SemanticVersion
except AttributeError:
import pip._vendor.distlib.version
SemVer = pip._vendor.distlib.version.SemanticVersion
try:
with open(FILE_UPDATE, 'r') as update_json:
data = json.load(update_json)
if SemVer(__version__) < SemVer(data['version']) or force_show:
print_out('UPDATE')
print_out(data['version'], 'Latest version of gae-init')
print_out(__version__, 'Your version is a bit behind')
print_out('CHANGESET', data['changeset'])
except (ValueError, KeyError):
os.remove(FILE_UPDATE)
except IOError:
pass
###############################################################################
# Doctor
###############################################################################
def internet_on():
try:
urllib2.urlopen(INTERNET_TEST_URL, timeout=2)
return True
except (urllib2.URLError, socket.timeout):
return False
def check_requirement(check_func):
result, name, help_url_id = check_func()
if not result:
print_out('NOT FOUND', name)
if help_url_id:
print('Please see %s%s' % (REQUIREMENTS_URL, help_url_id))
return False
return True
def find_gae_path():
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def check_internet():
return internet_on(), 'Internet', ''
def check_gae():
return bool(find_gae_path()), 'Google App Engine SDK', '#gae'
def check_git():
return bool(spawn.find_executable('git')), 'Git', '#git'
def check_nodejs():
return bool(spawn.find_executable('node')), 'Node.js', '#nodejs'
def check_pip():
return bool(spawn.find_executable('pip')), 'pip', '#pip'
def check_virtualenv():
return bool(spawn.find_executable('virtualenv')), 'virtualenv', '#virtualenv'
def doctor_says_ok():
checkers = [check_gae, check_git, check_nodejs, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return check_requirement(check_internet)
###############################################################################
# Main
###############################################################################
def run_start():
make_dirs(DIR_STORAGE)
port = int(ARGS.port)
run_command = ' '.join(map(str, [
'dev_appserver.py',
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--skip_sdk_update_check',
] + ARGS.args))
os.system(run_command)
def run():
return_code = 0
if len(sys.argv) == 1 or (ARGS.args and not ARGS.start):
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if doctor_says_ok():
return_code |= install_dependencies()
check_for_update()
if ARGS.show_version:
print_out_update(force_show=True)
else:
print_out_update()
if ARGS.start:
run_start()
if ARGS.install_dependencies:
return_code |= install_dependencies()
sys.exit(return_code)
if __name__ == '__main__':
run()
|
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A simple IPC mechanism for communicating between two local processes. We
use marshal to serialize data - this means that both client and server must
run the same Python version, and that clients must be trusted (as
un-marshalling untrusted data can result in arbitrary code execution).
"""
from __future__ import annotations
import asyncio
import fcntl
import json
import marshal
import os.path
import socket
import struct
from typing import Any
from libqtile.log_utils import logger
from libqtile.utils import get_cache_dir
HDRFORMAT = "!L"
HDRLEN = struct.calcsize(HDRFORMAT)
SOCKBASE = "qtilesocket.%s"
class IPCError(Exception):
pass
def find_sockfile(display: str | None = None):
"""
Finds the appropriate socket file for the given display.
If unspecified, the socket file is determined as follows:
- If WAYLAND_DISPLAY is set, use it.
- else if DISPLAY is set, use that.
- else check for the existence of a socket file for WAYLAND_DISPLAY=wayland-0
and if it exists, use it.
- else check for the existence of a socket file for DISPLAY=:0
and if it exists, use it.
- else raise an IPCError.
"""
cache_directory = get_cache_dir()
if display:
return os.path.join(cache_directory, SOCKBASE % display)
display = os.environ.get("WAYLAND_DISPLAY")
if display:
return os.path.join(cache_directory, SOCKBASE % display)
display = os.environ.get("DISPLAY")
if display:
return os.path.join(cache_directory, SOCKBASE % display)
sockfile = os.path.join(cache_directory, SOCKBASE % "wayland-0")
if os.path.exists(sockfile):
return sockfile
sockfile = os.path.join(cache_directory, SOCKBASE % ":0")
if os.path.exists(sockfile):
return sockfile
raise IPCError("Could not find socket file.")
class _IPC:
"""A helper class to handle properly packing and unpacking messages"""
@staticmethod
def unpack(data: bytes, *, is_json: bool | None = None) -> tuple[Any, bool]:
"""Unpack the incoming message
Parameters
----------
data: bytes
The incoming message to unpack
is_json: bool | None
If the message should be unpacked as json. By default, try to
unpack json and fallback gracefully to marshalled bytes.
Returns
-------
tuple[Any, bool]
A tuple of the unpacked object and a boolean denoting if the
message was deserialized using json. If True, the return message
should be packed as json.
"""
if is_json is None or is_json:
try:
return json.loads(data.decode()), True
except ValueError as e:
if is_json:
raise IPCError("Unable to decode json data") from e
try:
assert len(data) >= HDRLEN
size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]
assert size >= len(data[HDRLEN:])
return marshal.loads(data[HDRLEN : HDRLEN + size]), False
except AssertionError as e:
raise IPCError("error reading reply! (probably the socket was disconnected)") from e
@staticmethod
def pack(msg: Any, *, is_json: bool = False) -> bytes:
"""Pack the object into a message to pass"""
if is_json:
json_obj = json.dumps(msg, default=_IPC._json_encoder)
return json_obj.encode()
msg_bytes = marshal.dumps(msg)
size = struct.pack(HDRFORMAT, len(msg_bytes))
return size + msg_bytes
@staticmethod
def _json_encoder(field: Any) -> Any:
"""Convert non-serializable types to ones understood by stdlib json module"""
if isinstance(field, set):
return list(field)
raise ValueError(f"Tried to JSON serialize unsupported type {type(field)}: {field}")
class Client:
def __init__(self, socket_path: str, is_json=False) -> None:
"""Create a new IPC client
Parameters
----------
socket_path: str
The file path to the file that is used to open the connection to
the running IPC server.
is_json: bool
Pack and unpack messages as json
"""
self.socket_path = socket_path
self.is_json = is_json
def call(self, data: Any) -> Any:
return self.send(data)
def send(self, msg: Any) -> Any:
"""Send the message and return the response from the server
If any exception is raised by the server, that will propogate out of
this call.
"""
return asyncio.run(self.async_send(msg))
async def async_send(self, msg: Any) -> Any:
"""Send the message to the server
Connect to the server, then pack and send the message to the server,
then wait for and return the response from the server.
"""
try:
reader, writer = await asyncio.wait_for(
asyncio.open_unix_connection(path=self.socket_path), timeout=3
)
except (ConnectionRefusedError, FileNotFoundError):
raise IPCError("Could not open {}".format(self.socket_path))
try:
send_data = _IPC.pack(msg, is_json=self.is_json)
writer.write(send_data)
writer.write_eof()
read_data = await asyncio.wait_for(reader.read(), timeout=10)
except asyncio.TimeoutError:
raise IPCError("Server not responding")
finally:
# see the note in Server._server_callback()
writer.close()
await writer.wait_closed()
data, _ = _IPC.unpack(read_data, is_json=self.is_json)
return data
class Server:
def __init__(self, socket_path: str, handler) -> None:
self.socket_path = socket_path
self.handler = handler
self.server = None # type: asyncio.AbstractServer | None
if os.path.exists(socket_path):
os.unlink(socket_path)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)
fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
self.sock.bind(self.socket_path)
async def _server_callback(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Callback when a connection is made to the server
Read the data sent from the client, execute the requested command, and
send the reply back to the client.
"""
try:
logger.debug("Connection made to server")
data = await reader.read()
logger.debug("EOF received by server")
req, is_json = _IPC.unpack(data)
except IPCError:
logger.warning("Invalid data received, closing connection")
else:
rep = self.handler(req)
result = _IPC.pack(rep, is_json=is_json)
logger.debug("Sending result on receive EOF")
writer.write(result)
logger.debug("Closing connection on receive EOF")
writer.write_eof()
finally:
writer.close()
await writer.wait_closed()
async def __aenter__(self) -> "Server":
"""Start and return the server"""
await self.start()
return self
async def __aexit__(self, _exc_type, _exc_value, _tb) -> None:
"""Close and shutdown the server"""
await self.close()
async def start(self) -> None:
"""Start the server"""
assert self.server is None
logger.debug("Starting server")
server_coroutine = asyncio.start_unix_server(self._server_callback, sock=self.sock)
self.server = await server_coroutine
async def close(self) -> None:
"""Close and shutdown the server"""
assert self.server is not None
logger.debug("Stopping server on close")
self.server.close()
await self.server.wait_closed()
self.server = None
|
|
from sqlalchemy import exc as sqla_exc
from sqlalchemy import text
from alembic.testing import exclusions
from alembic.testing.requirements import SuiteRequirements
from alembic.util import compat
from alembic.util import sqla_compat
class DefaultRequirements(SuiteRequirements):
@property
def unicode_string(self):
return exclusions.skip_if(["oracle"])
@property
def alter_column(self):
return exclusions.skip_if(["sqlite"], "no ALTER COLUMN support")
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.skip_if(["sqlite", "firebird"], "no schema support")
@property
def no_referential_integrity(self):
"""test will fail if referential integrity is enforced"""
return exclusions.fails_on_everything_except("sqlite")
@property
def non_native_boolean(self):
"""test will fail if native boolean is provided"""
return exclusions.fails_if(
exclusions.LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean
)
)
@property
def non_native_boolean_check_constraint(self):
"""backend creates a check constraint for booleans if enabled"""
return exclusions.only_on(
exclusions.LambdaPredicate(
lambda config: not config.db.dialect.supports_native_boolean
and config.db.dialect.non_native_boolean_check_constraint
)
)
@property
def check_constraints_w_enforcement(self):
return exclusions.fails_on(["mysql", "mariadb"])
@property
def unnamed_constraints(self):
"""constraints without names are supported."""
return exclusions.only_on(["sqlite"])
@property
def fk_names(self):
"""foreign key constraints always have names in the DB"""
return exclusions.fails_on("sqlite")
@property
def reflects_fk_options(self):
return exclusions.open()
@property
def fk_initially(self):
"""backend supports INITIALLY option in foreign keys"""
return exclusions.only_on(["postgresql"])
@property
def fk_deferrable(self):
"""backend supports DEFERRABLE option in foreign keys"""
return exclusions.only_on(["postgresql", "oracle"])
@property
def fk_deferrable_is_reflected(self):
return self.fk_deferrable + exclusions.fails_on("oracle")
@property
def fk_ondelete_restrict(self):
return exclusions.only_on(["postgresql", "sqlite", "mysql"])
@property
def fk_onupdate_restrict(self):
return self.fk_onupdate + exclusions.fails_on(["mssql"])
@property
def fk_ondelete_noaction(self):
return exclusions.only_on(
["postgresql", "mysql", "mariadb", "sqlite", "mssql"]
)
@property
def fk_ondelete_is_reflected(self):
def go(config):
if exclusions.against(config, "mssql"):
return not sqla_compat.sqla_14_26
else:
return False
return exclusions.fails_if(go)
@property
def fk_onupdate_is_reflected(self):
def go(config):
if exclusions.against(config, "mssql"):
return not sqla_compat.sqla_14_26
else:
return False
return self.fk_onupdate + exclusions.fails_if(go)
@property
def fk_onupdate(self):
return exclusions.only_on(
["postgresql", "mysql", "mariadb", "sqlite", "mssql"]
)
@property
def reflects_unique_constraints_unambiguously(self):
return exclusions.fails_on(["mysql", "mariadb", "oracle"])
@property
def reflects_indexes_w_sorting(self):
# TODO: figure out what's happening on the SQLAlchemy side
# when we reflect an index that has asc() / desc() on the column
return exclusions.fails_on(["oracle"])
@property
def long_names(self):
if sqla_compat.sqla_14:
return exclusions.skip_if("oracle<18")
else:
return exclusions.skip_if("oracle")
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return exclusions.fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def datetime_timezone(self):
"""target dialect supports timezone with datetime types."""
return exclusions.only_on(["postgresql"])
@property
def postgresql(self):
return exclusions.only_on(["postgresql"])
@property
def mysql(self):
return exclusions.only_on(["mysql", "mariadb"])
@property
def oracle(self):
return exclusions.only_on(["oracle"])
@property
def mssql(self):
return exclusions.only_on(["mssql"])
@property
def postgresql_uuid_ossp(self):
def check_uuid_ossp(config):
if not exclusions.against(config, "postgresql"):
return False
try:
config.db.execute("SELECT uuid_generate_v4()")
return True
except:
return False
return exclusions.only_if(check_uuid_ossp)
def _has_pg_extension(self, name):
def check(config):
if not exclusions.against(config, "postgresql"):
return False
with config.db.connect() as conn:
count = conn.scalar(
text(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
)
return bool(count)
return exclusions.only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def autoincrement_on_composite_pk(self):
return exclusions.skip_if(["sqlite"], "not supported by database")
@property
def integer_subtype_comparisons(self):
"""if a compare of Integer and BigInteger is supported yet."""
return exclusions.skip_if(["oracle"], "not supported by alembic impl")
@property
def autocommit_isolation(self):
"""target database should support 'AUTOCOMMIT' isolation level"""
return exclusions.only_on(["postgresql", "mysql", "mariadb"])
@property
def computed_columns(self):
# TODO: in theory if these could come from SQLAlchemy dialects
# that would be helpful
return self.computed_columns_api + exclusions.skip_if(
["postgresql < 12", "sqlite < 3.31", "mysql < 5.7"]
)
@property
def computed_reflects_as_server_default(self):
# note that this rule will go away when SQLAlchemy correctly
# supports reflection of the "computed" construct; the element
# will consistently be present as both column.computed and
# column.server_default for all supported backends.
return (
self.computed_columns
+ exclusions.only_if(
["postgresql", "oracle"],
"backend reflects computed construct as a server default",
)
+ exclusions.skip_if(self.computed_reflects_normally)
)
@property
def computed_doesnt_reflect_as_server_default(self):
# note that this rule will go away when SQLAlchemy correctly
# supports reflection of the "computed" construct; the element
# will consistently be present as both column.computed and
# column.server_default for all supported backends.
return (
self.computed_columns
+ exclusions.skip_if(
["postgresql", "oracle"],
"backend reflects computed construct as a server default",
)
+ exclusions.skip_if(self.computed_reflects_normally)
)
@property
def check_constraint_reflection(self):
return exclusions.fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
def mysql_check_col_name_change(self, config):
# MySQL has check constraints that enforce an reflect, however
# they prevent a column's name from being changed due to a bug in
# MariaDB 10.2 as well as MySQL 8.0.16
if exclusions.against(config, ["mysql", "mariadb"]):
if sqla_compat._is_mariadb(config.db.dialect):
mnvi = sqla_compat._mariadb_normalized_version_info
norm_version_info = mnvi(config.db.dialect)
return norm_version_info >= (10, 2) and norm_version_info < (
10,
2,
22,
)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return True
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, ["mysql", "mariadb"]):
if sqla_compat._is_mariadb(config.db.dialect):
mnvi = sqla_compat._mariadb_normalized_version_info
norm_version_info = mnvi(config.db.dialect)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
@property
def json_type(self):
return exclusions.only_on(
[
lambda config: exclusions.against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and exclusions.against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"mariadb>=10.2.7",
"postgresql >= 9.3",
self._sqlite_json,
self._mssql_json,
]
)
def _mssql_json(self, config):
if not sqla_compat.sqla_14:
return False
else:
return exclusions.against(config, "mssql")
def _sqlite_json(self, config):
if not sqla_compat.sqla_14:
return False
elif not exclusions.against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.execute(
text(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
)
).scalar()
== "bar"
)
except sqla_exc.DBAPIError:
return False
@property
def identity_columns(self):
# TODO: in theory if these could come from SQLAlchemy dialects
# that would be helpful
return self.identity_columns_api + exclusions.only_on(
["postgresql >= 10", "oracle >= 12", "mssql"]
)
@property
def identity_columns_alter(self):
# TODO: in theory if these could come from SQLAlchemy dialects
# that would be helpful
return self.identity_columns_api + exclusions.only_on(
["postgresql >= 10", "oracle >= 12"]
)
@property
def supports_identity_on_null(self):
return self.identity_columns + exclusions.only_on(["oracle"])
@property
def legacy_engine(self):
return exclusions.only_if(
lambda config: not getattr(config.db, "_is_future", False)
)
@property
def stubs_test(self):
def requirements():
try:
import black # noqa
import zimports # noqa
return False
except Exception:
return True
imports = exclusions.skip_if(
requirements, "black and zimports are required for this test"
)
version = exclusions.only_if(
lambda _: compat.py39, "python 3.9 is required"
)
return imports + version
|
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import six
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Manila specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to manila/tests/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
translated_log = re.compile(
r"(.)*LOG\."
r"(audit|debug|error|info|warn|warning|critical|exception)"
r"\("
r"(_|_LE|_LI|_LW)"
r"\(")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _$")
underscore_import_check_multi = re.compile(r"(.)*import (.)*_, (.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
assert_no_xrange_re = re.compile(r"\s*xrange\s*\(")
assert_True = re.compile(r".*assertEqual\(True, .*\)")
no_log_warn = re.compile(r"\s*LOG.warn\(.*")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_logs(logical_line):
if translated_log.match(logical_line):
yield(0, "M359 Don't translate log messages!")
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
CHECK_DESC = 'M310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
underscore_import_check_multi.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif string_translation.match(logical_line):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('M325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
# Python 2
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
# Python 3
def visit_ExceptHandler(self, node):
if node.name:
self.name.append(node.name)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("M333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("M336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def no_xrange(logical_line):
if assert_no_xrange_re.match(logical_line):
yield(0, "M337: Do not use xrange().")
def validate_assertTrue(logical_line):
if re.match(assert_True, logical_line):
msg = ("M313: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
def check_uuid4(logical_line):
"""Generating UUID
Use oslo_utils.uuidutils to generate UUID instead of uuid4().
M354
"""
msg = ("M354: Use oslo_utils.uuidutils to generate UUID instead "
"of uuid4().")
if "uuid4()." in logical_line:
return
if "uuid4()" in logical_line:
yield (0, msg)
def no_log_warn_check(logical_line):
"""Disallow 'LOG.warn'
Deprecated LOG.warn(), instead use LOG.warning
://bugs.launchpad.net/manila/+bug/1508442
M338
"""
msg = ("M338: LOG.warn is deprecated, use LOG.warning.")
if re.match(no_log_warn, logical_line):
yield(0, msg)
def factory(register):
register(check_explicit_underscore_import)
register(no_translate_logs)
register(CheckForStrUnicodeExc)
register(CheckLoggingFormatArgs)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(dict_constructor_with_list_copy)
register(no_xrange)
register(validate_assertTrue)
register(check_uuid4)
register(no_log_warn_check)
|
|
#!/usr/bin/python
#
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
import logging
import optparse
import ConfigParser
import json
import urllib2
import git
import shutil
import zipfile
import tarfile
import datetime
import ast
import re
import getpass
try:
import pysvn
except ImportError:
# pysvn is not friendly, no pip or easy_install option.
# If import fails, we don't have svn support.
pass
class tSvn:
def __init__(self, url=None):
self.url = url
# Set up our client object
try:
self.client = pysvn.Client()
self.client.callback_ssl_server_trust_prompt = (
self.ssl_server_trust_prompt)
self.client.set_default_username(svn_user)
self.client.set_default_password(svn_pass)
except NameError:
logging.error('pysn module is absent, no svn support')
def ssl_server_trust_prompt(self, trust_dict):
# we know what we're connecting to, no need to validate
return (True, 0, True)
def dl_artifact_svn_conf(self, tmp_dir=None, location=None, revision=None):
logging.info('Downloading revision: %s '
'artifact: %s'
% (revision,
location))
logging.debug('Downloading to dir: %s' % tmp_dir)
if revision == 'HEAD':
# export the config
self.client.export(location,
tmp_dir,
force=True,
revision=pysvn.Revision(
pysvn.opt_revision_kind.head))
else:
# export the config
self.client.export(location,
tmp_dir,
force=True,
revision=pysvn.Revision(
pysvn.opt_revision_kind.number, revision))
class tFacter:
def __init__(self):
# need this for ct_*
os.environ["FACTERLIB"] = "/var/lib/puppet/lib/facter:/app/twonicorn/conf/facter"
p = subprocess.Popen(['facter'], stdout=subprocess.PIPE)
p.wait()
self.facts = p.stdout.readlines()
# strip removes the trailing \n
self.facts = dict(k.split(' => ') for k in
[s.strip() for s in self.facts if ' => ' in s])
def get_fact(self, fact):
return self.facts[fact]
class tGit:
def __init__(self, url=None):
self.url = url
# Need this because our cert is lame
os.environ['GIT_SSL_NO_VERIFY'] = 'true'
def dl_artifact_git_conf(self, tmp_dir, location, revision):
logging.info('Downloading revision: %s '
'artifact: %s'
% (revision, location))
# check out the repo
repo = git.Repo.clone_from(location, tmp_dir)
# reset HEAD to the revision we want
repo.head.reference = revision
repo.head.reset(index=True, working_tree=True)
def get_application_deploys(tcw_host, application_id, ct_env, ct_loc):
# Fetch the list of deploys for the application
# This becomes the api call
api_url = (api_protocol
+ '://'
+ tcw_host
+ '/api/application?'
+ 'id='
+ application_id
+ '&env='
+ ct_env
+ '&loc='
+ ct_loc
+ '&lifecycle=current')
logging.info('Requesting data from api: %s' % api_url)
response = urllib2.urlopen(api_url)
deployment_data = json.load(response)
if not deployment_data:
logging.error('No deployment data found for '
'application_id: %s. Aborting!'
% application_id)
logging.info('twoni-plete')
print ""
sys.exit(2)
logging.debug('Deployment Data: %s' % deployment_data)
deployments = {}
for index in range(len(deployment_data)):
deployment = deployment_data[index]
try:
deployments[deployment['deploy_id']] = {
'deploy_id': deployment['deploy_id'],
'package_name': deployment['package_name'],
'artifact_assignment_id': deployment['artifact_assignment_id'],
'deploy_path': deployment['deploy_path'],
'download_url': deployment['download_url'],
'revision': deployment['revision'],
'artifact_type': deployment['artifact_type'],
'repo_type': deployment['repo_type'],
'repo_name': deployment['repo_name']
}
except KeyError:
logging.error('Deployment data is incomplete. Did you forget to promote an artifact?')
sys.exit(1)
logging.debug('deploy_id=%s,artifact_assignment_id=%s,'
'deploy_path=%s,download_url=%s,'
'revision=%s,artifact_type=%s,repo_type=%s,repo_name=%s'
% (deployment['deploy_id'],
deployment['artifact_assignment_id'],
deployment['deploy_path'],
deployment['download_url'],
deployment['revision'],
deployment['artifact_type'],
deployment['repo_type'],
deployment['repo_name']))
return deployments
def parse_db_deployments(deployments):
db_deployments = {}
for key in deployments.keys():
db_deployments[key] = (
deployments[key]['artifact_assignment_id'])
return db_deployments
def check_manifest_exists(manifest_file):
logging.info('Checking for existence of manifest file: %s'
% manifest_file)
if not os.path.isfile(manifest_file):
logging.warn('Manifest file %s does not exist, creating'
% manifest_file)
open(manifest_file, 'a').close()
logging.info('Manifest file %s created'
% manifest_file)
return False
logging.debug('Manifest file exists: %s'
% manifest_file)
return True
def check_manifest_empty(manifest_file):
logging.info('Checking if manifest file is empty: %s'
% manifest_file)
if os.stat(manifest_file)[6] == 0:
logging.warn('Manifest file %s is empty. Installing the current '
'version of all artifacts.'
% manifest_file)
return False
logging.debug('Manifest file %s is not empty'
% manifest_file)
return True
def check_deployment_dirs(deployments):
logging.info('Checking for existence of deployment dirs')
retval = True
for k in deployments.keys():
logging.debug('Checking for existence of path: %s' % deployments[k]['deploy_path'])
if not os.path.isdir(deployments[k]['deploy_path']):
logging.warn('Path does not exist: %s Creating...' % deployments[k]['deploy_path'])
retval = False
subprocess.check_call(["/usr/bin/sudo",
"/bin/mkdir",
"-p",
deployments[k]['deploy_path']])
user = getpass.getuser()
logging.warn('Changing permissions of %s to user: %s' % (deployments[k]['deploy_path'],user))
subprocess.check_call(["/usr/bin/sudo",
"/bin/chown",
"-R",
'{0}:'.format(user),
deployments[k]['deploy_path']])
else:
logging.debug('Path exists: %s' % deployments[k]['deploy_path'])
return retval
def check_manifest(deployments, manifest_file):
deploys_todo = []
logging.debug('Comparing versions in manifest file : %s'
% manifest_file)
# read the last line
fileHandle = open(manifest_file, "r")
lineList = fileHandle.readlines()
fileHandle.close()
last_line = lineList[-1]
(inst_date,
inst_time,
inst_deploys) = last_line.split(' ', 2)
# convert the string to a dict for comparison.
inst_deploys = ast.literal_eval(inst_deploys)
logging.debug('Installed deployment:assignments: %s'
% inst_deploys)
db_deployments = parse_db_deployments(deployments)
logging.debug('DB deployment:assignments: %s'
% db_deployments)
# Check to see if we have to do anything
logging.info('Checking to see what we need to do...')
# first check if all the keys we want to install are there
for key in deployments.keys():
if key in inst_deploys:
logging.debug('Deploy %s is in the manifest.'
% key)
# check the value to make sure it's the same
if (deployments[key]['artifact_assignment_id'] ==
inst_deploys[key]):
logging.debug('Artifact assignment is the same '
'for deploy id %s in both sources.'
% key)
else:
# add the deploy to the list of things to do.
logging.debug('Artifact assignment is different '
'for deploy id %s. Adding to list of'
' deploys to install.'
% key)
deploys_todo.append(key)
else:
# add the deploy to the list of things to do.
logging.debug('Deploy id %s is missing from the '
'manifest. Adding to list of deploys '
'to install.'
% key)
deploys_todo.append(key)
if deploys_todo:
logging.info('We are going to upgrade the following '
'deploys : %s'
% deploys_todo)
else:
logging.info('Installed deployment:assignments '
'match the DB. Nothing to do.')
return deploys_todo
def clean_dir(tmp_dir=None):
# Clean the tmp dir first
if (os.path.isdir(tmp_dir)):
logging.debug('Removing tmp dir : %s' % tmp_dir)
shutil.rmtree(tmp_dir)
def dl_artifact_http(tmp_dir=None, download_url=None, revision=None):
logging.info('Downloading revision: %s artifact: %s'
% (revision,
download_url))
artifact = download_url.rsplit('/', 1)
artifact = artifact[1]
if not os.path.exists(tmp_dir):
logging.debug('Creating dir: %s' % tmp_dir)
os.makedirs(tmp_dir)
logging.debug('Downloading to dir: %s' % tmp_dir)
try:
if verify_ssl:
subprocess.check_call(["curl",
"-s",
"--cacert",
ca_bundle_file,
"-o",
tmp_dir + '/' + artifact,
download_url])
else:
logging.warn('ssl cert check disabled for download URL: %s' % download_url)
subprocess.check_call(["curl",
"-s",
"-k",
"-o",
tmp_dir + '/' + artifact,
download_url])
except Exception, e:
logging.error('Artifact download failed: %s' % e)
def get_py_version(location, package_name):
version = re.sub(package_name + '-', '', location)
version = re.sub('.tar.gz', '', version)
return version
def install_py_package(pip, payload):
logging.info('Installing package: %s' % payload)
logging.info('Install command: %s install --pre -U %s' % (pip, payload))
subprocess.check_call([pip,
"install",
"--pre",
"-U",
payload])
logging.info('The following packages are installed:')
subprocess.check_call([pip,
"freeze"])
def create_py_virtualenv(deploy_path):
logging.info('Creating Virtualenv: %s' % deploy_path)
subprocess.check_call(['virtualenv',
deploy_path])
def download_artifacts(deployments, deploys_todo, tmp_dir, ct_env):
# download everything first
for t in deploys_todo:
# Set our vars
deploy_id = deployments[t]['deploy_id']
package_name = deployments[t]['package_name']
artifact_assignment_id = (
deployments[t]['artifact_assignment_id'])
deploy_path = deployments[t]['deploy_path']
download_url = deployments[t]['download_url']
revision = deployments[t]['revision']
artifact_type = deployments[t]['artifact_type']
repo_type = deployments[t]['repo_type']
logging.debug('deploy_id=%s,artifact_assignment_id=%s,'
'deploy_path=%s,download_url=%s,'
'revision=%s,artifact_type=%s,repo_type=%s'
% (deploy_id,
artifact_assignment_id,
deploy_path,
download_url,
revision,
artifact_type,
repo_type))
deploy_id = str(deploy_id)
# Add the deploy id to the tmp dir
tmp_dir_id = tmp_dir + '/' + deploy_id
# Clean the deploy dir first
clean_dir(tmp_dir_id)
# download artifacts
if artifact_type == 'war' or artifact_type == 'tar' or artifact_type == 'jar':
logging.debug('artifact_type is %s.' % artifact_type)
if repo_type == 'http':
dl_artifact_http(tmp_dir_id, download_url, revision)
if artifact_type == 'jar':
# Rename the jar
artifact = download_url.rsplit('/', 1)
artifact = artifact[1]
logging.info('Renaming %s file %s to %s' % (artifact_type, tmp_dir_id + '/' + artifact, tmp_dir_id + '/' + package_name))
subprocess.check_call(['mv',
tmp_dir_id + '/' + artifact,
tmp_dir_id + '/' + package_name])
elif artifact_type == 'conf':
logging.debug('artifact_type is conf')
if repo_type == 'svn':
# Append ct_env so that source and detination are
# env scpecific
download_url = download_url + '/' + ct_env
tmp_dir_id = tmp_dir_id + '/' + ct_env
t_svn = tSvn(download_url)
t_svn.dl_artifact_svn_conf(tmp_dir_id,
download_url,
revision)
elif repo_type == 'git':
t_git = tGit(download_url)
t_git.dl_artifact_git_conf(tmp_dir_id,
download_url,
revision)
elif artifact_type == 'python':
artifact = download_url.rsplit('/', 1)
logging.info('Package Name is: %s' % package_name)
logging.info('Artifact is: %s' % artifact[1])
version = get_py_version(artifact[1], package_name)
logging.info('Version is: %s' % version)
pip = deploy_path + '/bin/pip'
payload = package_name + '==' + version
if os.path.isfile(pip):
install_py_package(pip, payload)
else:
create_py_virtualenv(deploy_path)
install_py_package(pip, payload)
def unzip(source_filename=None, dest_dir=None):
zipper = zipfile.ZipFile(source_filename)
zipper.extractall(dest_dir)
def untar(source_filename=None, dest_dir=None):
tar_file = tarfile.open(source_filename, 'r:gz')
tar_file.extractall(dest_dir)
def sync_artifact_war(tmp_dir_id, deploy_path, artifact_file):
tmp_artifact_path_current = tmp_dir_id + '/current'
# explode the war
logging.debug('Expanding artifact : %s in %s'
% (artifact_file,
tmp_artifact_path_current))
unzip(tmp_dir_id + '/' + artifact_file, tmp_artifact_path_current)
# rsync it
logging.debug('Rsyncing %s to %s'
% (tmp_artifact_path_current,
deploy_path))
# TODO: need to ensure no trailing / ?
subprocess.check_call(["rsync",
"-ra",
"--delete",
tmp_artifact_path_current,
deploy_path])
def sync_artifact_tar(tmp_dir_id, deploy_path, artifact_file):
# explode the tar
tmp_artifact_path_current = tmp_dir_id + '/current'
logging.debug('Expanding artifact : %s in %s'
% (artifact_file,
tmp_artifact_path_current))
untar(tmp_dir_id + '/' + artifact_file, tmp_artifact_path_current)
# FIXME: needs checking for files vs. dirs
dirs = os.listdir(tmp_artifact_path_current)
# rsync it
for d in dirs:
logging.debug('Rsyncing %s to %s'
% (tmp_artifact_path_current + '/' + d,
deploy_path))
subprocess.check_call(["rsync",
"-ra",
"--delete",
tmp_artifact_path_current + '/' + d,
deploy_path])
def sync_artifact_jar(tmp_dir_id, deploy_path, artifact_file):
# rsync it
logging.debug('Rsyncing %s to %s'
% (tmp_dir_id,
deploy_path))
subprocess.check_call(["rsync",
"-ra",
"--delete",
tmp_dir_id + '/',
deploy_path])
def sync_artifact_conf(tmp_dir_id=None, deploy_path=None):
logging.debug('Rsyncing %s to %s' % (tmp_dir_id, deploy_path))
subprocess.check_call(["rsync", "-ra", tmp_dir_id + '/', deploy_path])
def sync_artifacts(deployments, deploys_todo, tmp_dir, ct_env):
# sync artifacts next
for s in deploys_todo:
# Set our vars
deploy_id = deployments[s]['deploy_id']
artifact_assignment_id = (
deployments[s]['artifact_assignment_id'])
deploy_path = deployments[s]['deploy_path']
revision = deployments[s]['revision']
artifact_type = deployments[s]['artifact_type']
repo_type = deployments[s]['repo_type']
download_url = deployments[s]['download_url']
items = download_url.rsplit('/', 1)
artifact_file = items[1]
logging.info('Syncing deployment to %s' % deploy_path)
logging.debug('deploy_id=%s,artifact_assignment_id=%s,'
'deploy_path=%s,revision=%s,'
'artifact_type=%s,repo_type=%s,'
'artifact_file=%s'
% (deploy_id,
artifact_assignment_id,
deploy_path,
revision,
artifact_type,
repo_type,
artifact_file))
deploy_id = str(deploy_id)
# Add the deploy id to the tmp dir
tmp_dir_id = tmp_dir + '/' + deploy_id
# do the sync artifacts
if artifact_type == 'war':
logging.debug('artifact_type is war.')
sync_artifact_war(tmp_dir_id, deploy_path, artifact_file)
elif artifact_type == 'tar':
logging.debug('artifact_type is tar.')
sync_artifact_tar(tmp_dir_id, deploy_path, artifact_file)
elif artifact_type == 'jar':
logging.debug('artifact_type is jar.')
sync_artifact_jar(tmp_dir_id, deploy_path, artifact_file)
elif artifact_type == 'conf':
logging.debug('artifact_type is conf.')
tmp_dir_id = tmp_dir_id + '/' + ct_env
sync_artifact_conf(tmp_dir_id, deploy_path)
def update_manifest(deployments, manifest_file):
# update the manifest with everything we just installed from the db.
logging.debug('Updating the manifest file')
db_deployments = parse_db_deployments(deployments)
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
string = date + " " + str(db_deployments) + "\n"
with open(manifest_file, "a") as myfile:
myfile.write(string)
logging.debug('Updated manifest file %s with application '
'assignments: %s'
% (manifest_file,
str(db_deployments)))
def main(argv):
parser = optparse.OptionParser(
description='Deploy configs and artifacts automagically.')
parser.add_option('--app-id',
'-i',
action='store',
type='string',
dest='application_id',
help='Application ID in twonicorn DB',
default=None)
parser.add_option('--config',
'-c',
action='store',
type='string',
dest='config_file',
help='Config file to use.',
default='/app/twonicorn/conf/twonicorn.conf')
parser.add_option('--secrets',
'-s',
action='store',
type='string',
dest='secrets_config_file',
help='Secret config file to use.',
default='/app/secrets/twonicorn-deploy.conf')
parser.add_option('--verbose',
'-v',
action='store_true',
dest='verbose',
help='Log debug messages to the log file',
default=None)
(options, args) = parser.parse_args()
# Make sure we have required options
required = ['application_id']
for r in required:
if not options.__dict__[r]:
print >> sys.stderr, \
"\nERROR - Required option is missing: %s\n" % r
parser.print_help()
sys.exit(2)
# Load facts
t_facts = tFacter()
ct_env = t_facts.get_fact('ct_env')
ct_loc = t_facts.get_fact('ct_loc')
# Get the config
config = ConfigParser.ConfigParser()
config.read(options.config_file)
secrets_config = ConfigParser.ConfigParser()
secrets_config.read(options.secrets_config_file)
# Globalizing these. Otherwise will be passing them all over the
# place for no reason.
global verify_ssl
global ca_bundle_file
global api_protocol
global svn_user
global svn_pass
api_protocol = config.get('main', 'tcw.api_protocol')
verify_ssl = bool(config.get('deploy', 'verify_ssl'))
ca_bundle_file = config.get('deploy', 'ca_bundle_file')
svn_user = config.get('main', 'tcw.svn_user')
svn_pass = secrets_config.get('main', 'tcw.svn_pass')
tcw_host = config.get('main', 'tcw.host')
manifest_dir = config.get('main', 'manifest.dir')
tmp_dir = config.get('main', 'tmp.dir')
log_file = config.get('deploy', 'log.file')
application_id = options.application_id
manifest_file = manifest_dir + '/application_' \
+ options.application_id + '.txt'
if options.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# Set up logging to file
logging.basicConfig(level=log_level,
format='%(asctime)s %(levelname)-8s- %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='a')
console = logging.StreamHandler()
console.setLevel(log_level)
formatter = logging.Formatter('%(levelname)-8s- %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info('Twonicorn START')
if options.verbose:
logging.info('Debug messages are being written to the log file : %s'
% log_file)
logging.info('Getting deployment information from twonicorn api : %s'
% tcw_host)
# Do all the things
deployments = get_application_deploys(tcw_host, application_id, ct_env, ct_loc)
deploys_todo = None
if not check_manifest_exists(manifest_file):
deploys_todo = deployments.keys()
if not check_manifest_empty(manifest_file):
deploys_todo = deployments.keys()
if not check_deployment_dirs(deployments):
logging.warn('At least one deployment dir is missing. Installing the current '
'version of all artifacts.')
deploys_todo = deployments.keys()
if not deploys_todo:
deploys_todo = check_manifest(deployments, manifest_file)
# We know what we have to do, so let's go
if deploys_todo:
download_artifacts(deployments, deploys_todo, tmp_dir, ct_env)
sync_artifacts(deployments, deploys_todo, tmp_dir, ct_env)
# update the manifest with everything from the db.
update_manifest(deployments, manifest_file)
logging.info('Twonicorn END')
print ""
if __name__ == '__main__':
main(sys.argv[1:])
|
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functions for generating flux diagrams.
"""
import os.path
import re
import math
import numpy
import pydot
from rmgpy.solver.base import TerminationTime, TerminationConversion
from rmgpy.solver.liquid import LiquidReactor
from rmgpy.kinetics.diffusionLimited import diffusionLimiter
from rmgpy.rmg.settings import SimulatorSettings
from .loader import loadRMGJob
################################################################################
# Here you can set the default values for options that control the generated
# flux diagrams.
# Options controlling the individual flux diagram renderings:
program = 'dot' # The program to use to lay out the nodes and edges
maximumNodeCount = 50 # The maximum number of nodes to show in the diagram
maximumEdgeCount = 50 # The maximum number of edges to show in the diagram
concentrationTolerance = 1e-6 # The lowest fractional concentration to show (values below this will appear as zero)
speciesRateTolerance = 1e-6 # The lowest fractional species rate to show (values below this will appear as zero)
maximumNodePenWidth = 10.0 # The thickness of the border around a node at maximum concentration
maximumEdgePenWidth = 10.0 # The thickness of the edge at maximum species rate
# Options controlling the ODE simulations:
initialTime = 1e-12 # The time at which to initiate the simulation, in seconds
timeStep = 10**0.1 # The multiplicative factor to use between consecutive time points
absoluteTolerance = 1e-16 # The absolute tolerance to use in the ODE simluations
relativeTolerance = 1e-8 # The relative tolerance to use in the ODE simulations
# Options controlling the generated movie:
framesPerSecond = 6 # The number of frames per second in the generated movie
initialPadding = 5 # The number of seconds to display the initial fluxes at the start of the video
finalPadding = 5 # The number of seconds to display the final fluxes at the end of the video
################################################################################
def generateFluxDiagram(reactionModel, times, concentrations, reactionRates, outputDirectory, centralSpecies=None, speciesDirectory=None, settings=None):
"""
For a given `reactionModel` and simulation results stored as arrays of
`times`, species `concentrations`, and `reactionRates`, generate a series
of flux diagrams as frames of an animation, then stitch them together into
a movie. The individual frames and the final movie are saved on disk at
`outputDirectory.`
"""
global maximumNodeCount, maximumEdgeCount, concentrationTolerance, speciesRateTolerance, maximumNodePenWidth, maximumEdgePenWidth
# Allow user defined settings for flux diagram generation if given
if settings:
maximumNodeCount = settings.get('maximumNodeCount', maximumNodeCount)
maximumEdgeCount = settings.get('maximumEdgeCount', maximumEdgeCount)
concentrationTolerance = settings.get('concentrationTolerance', concentrationTolerance)
speciesRateTolerance = settings.get('speciesRateTolerance', speciesRateTolerance)
maximumNodePenWidth = settings.get('maximumNodePenWidth', maximumNodePenWidth)
maximumEdgePenWidth= settings.get('maximumEdgePenWidth', maximumEdgePenWidth)
# Get the species and reactions corresponding to the provided concentrations and reaction rates
speciesList = reactionModel.core.species[:]
numSpecies = len(speciesList)
reactionList = reactionModel.core.reactions[:]
numReactions = len(reactionList)
#search index of central species:
if centralSpecies is not None:
for i, species in enumerate(speciesList):
if species.label == centralSpecies:
centralSpeciesIndex = i
break
# Compute the rates between each pair of species (big matrix warning!)
speciesRates = numpy.zeros((len(times),numSpecies,numSpecies), numpy.float64)
for index, reaction in enumerate(reactionList):
rate = reactionRates[:,index]
if not reaction.pairs: reaction.generatePairs()
for reactant, product in reaction.pairs:
reactantIndex = speciesList.index(reactant)
productIndex = speciesList.index(product)
speciesRates[:,reactantIndex,productIndex] += rate
speciesRates[:,productIndex,reactantIndex] -= rate
# Determine the maximum concentration for each species and the maximum overall concentration
maxConcentrations = numpy.max(numpy.abs(concentrations), axis=0)
maxConcentration = numpy.max(maxConcentrations)
# Determine the maximum rate for each species-species pair and the maximum overall species-species rate
maxSpeciesRates = numpy.max(numpy.abs(speciesRates), axis=0)
maxSpeciesRate = numpy.max(maxSpeciesRates)
speciesIndex = maxSpeciesRates.reshape((numSpecies*numSpecies)).argsort()
# Determine the nodes and edges to keep
nodes = []; edges = []
if centralSpecies is None:
for i in range(numSpecies*numSpecies):
productIndex, reactantIndex = divmod(speciesIndex[-i-1], numSpecies)
if reactantIndex > productIndex:
# Both reactant -> product and product -> reactant are in this list,
# so only keep one of them
continue
if maxSpeciesRates[reactantIndex, productIndex] == 0:
break
if reactantIndex not in nodes and len(nodes) < maximumNodeCount: nodes.append(reactantIndex)
if productIndex not in nodes and len(nodes) < maximumNodeCount: nodes.append(productIndex)
if len(nodes) > maximumNodeCount:
break
edges.append([reactantIndex, productIndex])
if len(edges) >= maximumEdgeCount:
break
else:
nodes.append(centralSpeciesIndex)
for index, reaction in enumerate(reactionList):
for reactant, product in reaction.pairs:
reactantIndex = speciesList.index(reactant)
productIndex = speciesList.index(product)
if maxSpeciesRates[reactantIndex, productIndex] == 0:
break
if len(nodes) > maximumNodeCount or len(edges) >= maximumEdgeCount:
break
if reactantIndex == centralSpeciesIndex:
if productIndex not in nodes:
nodes.append(productIndex)
edges.append([reactantIndex, productIndex])
if productIndex == centralSpeciesIndex:
if reactantIndex not in nodes:
nodes.append(reactantIndex)
edges.append([reactantIndex, productIndex])
# Create the master graph
# First we're going to generate the coordinates for all of the nodes; for
# this we use the thickest pen widths for all nodes and edges
graph = pydot.Dot('flux_diagram', graph_type='digraph', overlap="false")
graph.set_rankdir('LR')
graph.set_fontname('sans')
graph.set_fontsize('10')
# Add a node for each species
for index in nodes:
species = speciesList[index]
node = pydot.Node(name=str(species))
node.set_penwidth(maximumNodePenWidth)
graph.add_node(node)
# Try to use an image instead of the label
speciesIndex = str(species) + '.png'
imagePath = ''
if not speciesDirectory or not os.path.exists(speciesDirectory):
continue
for root, dirs, files in os.walk(speciesDirectory):
for f in files:
if f.endswith(speciesIndex):
imagePath = os.path.join(root, f)
break
if os.path.exists(imagePath):
node.set_image(imagePath)
node.set_label(" ")
# Add an edge for each species-species rate
for reactantIndex, productIndex in edges:
if reactantIndex in nodes and productIndex in nodes:
reactant = speciesList[reactantIndex]
product = speciesList[productIndex]
edge = pydot.Edge(str(reactant), str(product))
edge.set_penwidth(maximumEdgePenWidth)
graph.add_edge(edge)
# Generate the coordinates for all of the nodes using the specified program
graph = pydot.graph_from_dot_data(graph.create_dot(prog=program))[0]
# Now iterate over the time points, setting the pen widths appropriately
# This should preserve the coordinates of the nodes from frame to frame
frameNumber = 1
for t in range(len(times)):
# Update the nodes
slope = -maximumNodePenWidth / math.log10(concentrationTolerance)
for index in nodes:
species = speciesList[index]
if re.search(r'^[a-zA-Z0-9_]*$',str(species)) is not None:
species_string = str(species)
else:
# species name contains special characters
species_string = '"{0}"'.format(str(species))
node = graph.get_node(species_string)[0]
concentration = concentrations[t,index] / maxConcentration
if concentration < concentrationTolerance:
penwidth = 0.0
else:
penwidth = round(slope * math.log10(concentration) + maximumNodePenWidth,3)
node.set_penwidth(penwidth)
# Update the edges
slope = -maximumEdgePenWidth / math.log10(speciesRateTolerance)
for index in range(len(edges)):
reactantIndex, productIndex = edges[index]
if reactantIndex in nodes and productIndex in nodes:
reactant = speciesList[reactantIndex]
product = speciesList[productIndex]
if re.search(r'^[a-zA-Z0-9_]*$',str(reactant)) is not None:
reactant_string = str(reactant)
else:
reactant_string = '"{0}"'.format(str(reactant))
if re.search(r'^[a-zA-Z0-9_]*$',str(product)) is not None:
product_string = str(product)
else:
product_string = '"{0}"'.format(str(product))
edge = graph.get_edge(reactant_string, product_string)[0]
# Determine direction of arrow based on sign of rate
speciesRate = speciesRates[t,reactantIndex,productIndex] / maxSpeciesRate
if speciesRate < 0:
edge.set_dir("back")
speciesRate = -speciesRate
else:
edge.set_dir("forward")
# Set the edge pen width
if speciesRate < speciesRateTolerance:
penwidth = 0.0
edge.set_dir("none")
else:
penwidth = round(slope * math.log10(speciesRate) + maximumEdgePenWidth,3)
edge.set_penwidth(penwidth)
# Save the graph at this time to a dot file and a PNG image
if times[t] == 0:
label = 't = 0 s'
else:
label = 't = 10^{0:.1f} s'.format(math.log10(times[t]))
graph.set_label(label)
if t == 0:
repeat = framesPerSecond * initialPadding
elif t == len(times) - 1:
repeat = framesPerSecond * finalPadding
else:
repeat = 1
for r in range(repeat):
graph.write_dot(os.path.join(outputDirectory, 'flux_diagram_{0:04d}.dot'.format(frameNumber)))
graph.write_png(os.path.join(outputDirectory, 'flux_diagram_{0:04d}.png'.format(frameNumber)))
frameNumber += 1
# Use ffmpeg to stitch the PNG images together into a movie
import subprocess
command = ['ffmpeg',
'-framerate', '{0:d}'.format(framesPerSecond), # Duration of each image
'-i', 'flux_diagram_%04d.png', # Input file format
'-c:v', 'mpeg4', # Encoder
'-r', '30', # Video framerate
'-pix_fmt', 'yuv420p', # Pixel format
'flux_diagram.avi'] # Output filename
subprocess.check_call(command, cwd=outputDirectory)
################################################################################
def simulate(reactionModel, reactionSystem, settings=None):
"""
Generate and return a set of core and edge species and reaction fluxes
by simulating the given `reactionSystem` using the given `reactionModel`.
"""
global timeStep
# Allow user defined settings for flux diagram generation if given
if settings:
timeStep = settings.get('timeStep', timeStep)
coreSpecies = reactionModel.core.species
coreReactions = reactionModel.core.reactions
edgeSpecies = reactionModel.edge.species
edgeReactions = reactionModel.edge.reactions
speciesIndex = {}
for index, spec in enumerate(coreSpecies):
speciesIndex[spec] = index
simulatorSettings = SimulatorSettings(atol=absoluteTolerance,rtol=relativeTolerance)
# Enable constant species for LiquidReactor
if isinstance(reactionSystem, LiquidReactor):
if reactionSystem.constSPCNames is not None:
reactionSystem.get_constSPCIndices(coreSpecies)
reactionSystem.initializeModel(coreSpecies, coreReactions, edgeSpecies, edgeReactions,
atol=simulatorSettings.atol, rtol=simulatorSettings.rtol,
sens_atol=simulatorSettings.sens_atol, sens_rtol=simulatorSettings.sens_rtol)
# Copy the initial conditions to use in evaluating conversions
y0 = reactionSystem.y.copy()
time = []
coreSpeciesConcentrations = []
coreReactionRates = []
edgeReactionRates = []
nextTime = initialTime
terminated = False
while not terminated:
# Integrate forward in time to the next time point
reactionSystem.advance(nextTime)
time.append(reactionSystem.t)
coreSpeciesConcentrations.append(reactionSystem.coreSpeciesConcentrations)
coreReactionRates.append(reactionSystem.coreReactionRates)
edgeReactionRates.append(reactionSystem.edgeReactionRates)
# Finish simulation if any of the termination criteria are satisfied
for term in reactionSystem.termination:
if isinstance(term, TerminationTime):
if reactionSystem.t > term.time.value_si:
terminated = True
break
elif isinstance(term, TerminationConversion):
index = speciesIndex[term.species]
if (y0[index] - reactionSystem.y[index]) / y0[index] > term.conversion:
terminated = True
break
# Increment destination step time if necessary
if reactionSystem.t >= 0.9999 * nextTime:
nextTime *= timeStep
time = numpy.array(time)
coreSpeciesConcentrations = numpy.array(coreSpeciesConcentrations)
coreReactionRates = numpy.array(coreReactionRates)
edgeReactionRates = numpy.array(edgeReactionRates)
return time, coreSpeciesConcentrations, coreReactionRates, edgeReactionRates
################################################################################
def loadChemkinOutput(outputFile, reactionModel):
"""
Load the species concentrations from a Chemkin Output file in a simulation
and generate the reaction rates at each time point.
"""
import rmgpy.constants as constants
from rmgpy.quantity import Quantity
coreReactions = reactionModel.core.reactions
edgeReactions = reactionModel.edge.reactions
speciesList = reactionModel.core.species
time = []
coreSpeciesConcentrations = []
coreReactionRates = []
edgeReactionRates = []
with open(outputFile, 'r') as f:
line = f.readline()
while line != '' and 'SPECIFIED END' not in line:
line.strip()
tokens = line.split()
if ' TIME ' in line:
# Time is in seconds
time.append(float(tokens[-2]))
elif ' PRESSURE ' in line:
# Pressure from Chemkin is in atm
P = Quantity(float(tokens[-2]),'atm')
elif ' TEMPERATURE ' in line:
# Temperature from Chemkin in in K
T = Quantity(float(tokens[-2]),'K')
elif ' MOLE FRACTIONS ' in line:
# Species always come in the same order as listed in chem.inp
molefractions = []
line = f.readline() # This one reads the blank line which follows
line = f.readline()
while line.strip() != '':
tokens = line.split()
for value in tokens[2::3]:
# Make all concentrations positive
if value.find('-') == 0:
value = value.replace('-','',1)
# Sometimes chemkin removes the `E` in scientific notation due to lack of space,
# rendering invalid float values. If this is the case, add it in.
if value.find('-') != -1:
if value.find('E') == -1:
value = value.replace('-','E-')
molefractions.append(float(value))
line = f.readline()
totalConcentration = P.value_si/constants.R/T.value_si
coreSpeciesConcentrations.append([molefrac*totalConcentration for molefrac in molefractions])
coreRates = []
edgeRates = []
for reaction in coreReactions:
rate = reaction.getRateCoefficient(T.value_si,P.value_si)
for reactant in reaction.reactants:
rate *= molefractions[speciesList.index(reactant)]*totalConcentration
coreRates.append(rate)
for reaction in edgeReactions:
edgeRates.append(reaction.getRateCoefficient(T.value_si,P.value_si))
if coreRates:
coreReactionRates.append(coreRates)
if edgeRates:
edgeReactionRates.append(edgeRates)
line=f.readline()
time = numpy.array(time)
coreSpeciesConcentrations = numpy.array(coreSpeciesConcentrations)
coreReactionRates = numpy.array(coreReactionRates)
edgeReactionRates = numpy.array(edgeReactionRates)
return time, coreSpeciesConcentrations, coreReactionRates, edgeReactionRates
################################################################################
def createFluxDiagram(inputFile, chemkinFile, speciesDict, savePath=None, speciesPath=None, java=False, settings=None,
chemkinOutput='', centralSpecies=None, diffusionLimited=True):
"""
Generates the flux diagram based on a condition 'inputFile', chemkin.inp chemkinFile,
a speciesDict txt file, plus an optional chemkinOutput file.
"""
if speciesPath is None:
speciesPath = os.path.join(os.path.dirname(inputFile), 'species')
generateImages = True
else:
generateImages = False
rmg = loadRMGJob(inputFile, chemkinFile, speciesDict, generateImages=generateImages, useJava=java)
if savePath is None:
savePath = os.path.join(rmg.outputDirectory, 'flux')
# if you have a chemkin output, then you only have one reactionSystem
if chemkinOutput:
outDir = os.path.join(savePath, '1')
try:
os.makedirs(outDir)
except OSError:
pass
print 'Extracting species concentrations and calculating reaction rates from chemkin output...'
time, coreSpeciesConcentrations, coreReactionRates, edgeReactionRates = loadChemkinOutput(chemkinOutput, rmg.reactionModel)
print 'Generating flux diagram for chemkin output...'
generateFluxDiagram(rmg.reactionModel, time, coreSpeciesConcentrations, coreReactionRates, outDir, centralSpecies, speciesPath, settings)
else:
# Generate a flux diagram video for each reaction system
for index, reactionSystem in enumerate(rmg.reactionSystems):
outDir = os.path.join(savePath, '{0:d}'.format(index+1))
try:
os.makedirs(outDir)
except OSError:
# Fail silently on any OS errors
pass
# Enable diffusion-limited rates
if diffusionLimited and isinstance(reactionSystem, LiquidReactor):
rmg.loadDatabase()
solventData = rmg.database.solvation.getSolventData(rmg.solvent)
diffusionLimiter.enable(solventData, rmg.database.solvation)
# If there is no termination time, then add one to prevent jobs from
# running forever
if not any([isinstance(term, TerminationTime) for term in reactionSystem.termination]):
reactionSystem.termination.append(TerminationTime((1e10,'s')))
print 'Conducting simulation of reaction system {0:d}...'.format(index+1)
time, coreSpeciesConcentrations, coreReactionRates, edgeReactionRates = simulate(rmg.reactionModel, reactionSystem, settings)
print 'Generating flux diagram for reaction system {0:d}...'.format(index+1)
generateFluxDiagram(rmg.reactionModel, time, coreSpeciesConcentrations, coreReactionRates, outDir,
centralSpecies, speciesPath, settings)
|
|
from basic import Basic, S
from operations import AssocOp
from cache import cacheit
from logic import fuzzy_not
from numbers import Integer, Rational
from symbol import Symbol
# internal marker to indicate:
# "there are still non-commutative objects -- don't forget to processe them"
class NC_Marker:
is_Order = False
is_Mul = False
is_Number = False
is_commutative = False
class Mul(AssocOp):
__slots__ = []
is_Mul = True
@classmethod
def flatten(cls, seq):
# apply associativity, separate commutative part of seq
c_part = [] # out: commutative factors
nc_part = [] # out: non-commutative factors
nc_seq = []
coeff = S.One # standalone term
# e.g. 3 * ...
c_powers = [] # (base,exp) n
# e.g. (x,n) for x
num_exp = [] # (num-base, exp) y
# e.g. (3, y) for ... * 3 * ...
order_symbols = None
# --- PART 1 ---
#
# "collect powers and coeff":
#
# o coeff
# o c_powers
# o num_exp
#
# NOTE: this is optimized for all-objects-are-commutative case
for o in seq:
# O(x)
if o.is_Order:
o, order_symbols = o.as_expr_symbols(order_symbols)
# Mul([...])
if o.is_Mul:
if o.is_commutative:
seq.extend(o.args) # XXX zerocopy?
else:
# NCMul can have commutative parts as well
for q in o.args:
if q.is_commutative:
seq.append(q)
else:
nc_seq.append(q)
# append non-commutative marker, so we don't forget to
# process scheduled non-commutative objects
seq.append(NC_Marker)
continue
# 3
elif o.is_Number:
coeff *= o
continue
elif o.is_commutative:
# e
# o = b
b, e = o.as_base_exp()
# y
# 3
if o.is_Pow and b.is_Number:
# get all the factors with numeric base so they can be
# combined below
num_exp.append((b,e))
continue
# n n n
# (-3 + y) -> (-1) * (3 - y)
if b.is_Add and e.is_Number:
#found factor (x+y)**number; split off initial coefficient
c, t = b.as_coeff_terms()
#last time I checked, Add.as_coeff_terms returns One or NegativeOne
#but this might change
if c.is_negative and not e.is_integer:
# extracting root from negative number: ignore sign
if c is not S.NegativeOne:
# make c positive (probably never occurs)
coeff *= (-c) ** e
assert len(t)==1,`t`
b = -t[0]
#else: ignoring sign from NegativeOne: nothing to do!
elif c is not S.One:
coeff *= c ** e
assert len(t)==1,`t`
b = t[0]
#else: c is One, so pass
c_powers.append((b,e))
# NON-COMMUTATIVE
# TODO: Make non-commutative exponents not combine automatically
else:
if o is not NC_Marker:
nc_seq.append(o)
# process nc_seq (if any)
while nc_seq:
o = nc_seq.pop(0)
if not nc_part:
nc_part.append(o)
continue
# b c b+c
# try to combine last terms: a * a -> a
o1 = nc_part.pop()
b1,e1 = o1.as_base_exp()
b2,e2 = o.as_base_exp()
if b1==b2:
o12 = b1 ** (e1 + e2)
# now o12 could be a commutative object
if o12.is_commutative:
seq.append(o12)
continue
else:
nc_seq.insert(0, o12)
else:
nc_part.append(o1)
nc_part.append(o)
# We do want a combined exponent if it would not be an Add, such as
# y 2y 3y
# x * x -> x
# We determine this if two exponents have the same term in as_coeff_terms
#
# Unfortunately, this isn't smart enough to consider combining into
# exponents that might already be adds, so thing like:
# z - y y
# x * x will be left alone. This is because checking every possible
# combination can slow things down.
new_c_powers = []
common_b = {} # b:e
# First gather exponents of common bases
for b, e in c_powers:
co = e.as_coeff_terms()
if b in common_b:
if co[1] in common_b[b]:
common_b[b][co[1]] += co[0]
else:
common_b[b][co[1]] = co[0]
else:
common_b[b] = {co[1]:co[0]}
for b,e, in common_b.items():
for t, c in e.items():
new_c_powers.append((b,c*Mul(*t)))
c_powers = new_c_powers
# And the same for numeric bases
new_num_exp = []
common_b = {} # b:e
for b, e in num_exp:
co = e.as_coeff_terms()
if b in common_b:
if co[1] in common_b[b]:
common_b[b][co[1]] += co[0]
else:
common_b[b][co[1]] = co[0]
else:
common_b[b] = {co[1]:co[0]}
for b,e, in common_b.items():
for t, c in e.items():
new_num_exp.append((b,c*Mul(*t)))
num_exp = new_num_exp
# --- PART 2 ---
#
# o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow)
# o combine collected powers (2**x * 3**x -> 6**x)
# with numeric base
# ................................
# now we have:
# - coeff:
# - c_powers: (b, e)
# - num_exp: (2, e)
# 0 1
# x -> 1 x -> x
for b, e in c_powers:
if e is S.Zero:
continue
if e is S.One:
if b.is_Number:
coeff *= b
else:
c_part.append(b)
elif e.is_Integer and b.is_Number:
coeff *= b ** e
else:
c_part.append(Pow(b, e))
# x x x
# 2 * 3 -> 6
inv_exp_dict = {} # exp:Mul(num-bases) x x
# e.g. x:6 for ... * 2 * 3 * ...
for b,e in num_exp:
if e in inv_exp_dict:
inv_exp_dict[e] *= b
else:
inv_exp_dict[e] = b
for e,b in inv_exp_dict.items():
if e is S.Zero:
continue
if e is S.One:
if b.is_Number:
coeff *= b
else:
c_part.append(b)
elif e.is_Integer and b.is_Number:
coeff *= b ** e
else:
obj = b**e
if obj.is_Number:
coeff *= obj
else:
c_part.append(obj)
# oo, -oo
if (coeff is S.Infinity) or (coeff is S.NegativeInfinity):
new_c_part = []
for t in c_part:
if t.is_positive:
continue
if t.is_negative:
coeff = -coeff
continue
new_c_part.append(t)
c_part = new_c_part
new_nc_part = []
for t in nc_part:
if t.is_positive:
continue
if t.is_negative:
coeff = -coeff
continue
new_nc_part.append(t)
nc_part = new_nc_part
# 0, nan
elif (coeff is S.Zero) or (coeff is S.NaN):
# we know for sure the result will be the same as coeff (0 or nan)
return [coeff], [], order_symbols
elif coeff.is_Real:
if coeff == Real(0):
c_part, nc_part = [coeff], []
elif coeff == Real(1):
# change it to One, so it doesn't get inserted to slot0
coeff = S.One
# order commutative part canonically
c_part.sort(Basic.compare)
# current code expects coeff to be always in slot-0
if coeff is not S.One:
c_part.insert(0, coeff)
# we are done
if len(c_part)==2 and c_part[0].is_Number and c_part[1].is_Add:
# 2*(1+a) -> 2 + 2 * a
coeff = c_part[0]
c_part = [Add(*[coeff*f for f in c_part[1].args])]
return c_part, nc_part, order_symbols
def _eval_power(b, e):
if e.is_Number:
if b.is_commutative:
if e.is_Integer:
# (a*b)**2 -> a**2 * b**2
return Mul(*[s**e for s in b.args])
if e.is_rational:
coeff, rest = b.as_coeff_terms()
if coeff == -1:
return None
elif coeff < 0:
return (-coeff)**e * Mul(*((S.NegativeOne,) +rest))**e
else:
return coeff**e * Mul(*[s**e for s in rest])
coeff, rest = b.as_coeff_terms()
if coeff is not S.One:
# (2*a)**3 -> 2**3 * a**3
return coeff**e * Mul(*[s**e for s in rest])
elif e.is_Integer:
coeff, rest = b.as_coeff_terms()
l = [s**e for s in rest]
if e.is_negative:
l.reverse()
return coeff**e * Mul(*l)
c,t = b.as_coeff_terms()
if e.is_even and c.is_Number and c < 0:
return (-c * Mul(*t)) ** e
#if e.atoms(Wild):
# return Mul(*[t**e for t in b])
def _eval_evalf(self, prec):
return AssocOp._eval_evalf(self, prec).expand()
@cacheit
def as_two_terms(self):
args = self.args
if len(args) == 1:
return S.One, self
elif len(args) == 2:
return args
else:
return args[0], self._new_rawargs(*args[1:])
@cacheit
def as_coeff_terms(self, x=None):
if x is not None:
l1 = []
l2 = []
for f in self.args:
if f.has(x):
l2.append(f)
else:
l1.append(f)
return Mul(*l1), tuple(l2)
coeff = self.args[0]
if coeff.is_Number:
return coeff, self.args[1:]
return S.One, self.args
@staticmethod
def _expandsums(sums):
"""
Helper function for _eval_expand_mul.
sums must be a list of instances of Basic.
"""
L = len(sums)
if L == 1:
return sums[0].args
terms = []
left = Mul._expandsums(sums[:L//2])
right = Mul._expandsums(sums[L//2:])
terms = [Mul(a, b) for a in left for b in right]
added = Add(*terms)
if added.is_Add:
terms = list(added.args)
else:
terms = [added]
return terms
def _eval_expand_basic(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_mul(self, deep=True, **hints):
plain, sums, rewrite = [], [], False
for factor in self.args:
if deep:
term = factor.expand(deep=deep, **hints)
if term != factor:
factor = term
rewrite = True
if factor.is_Add:
sums.append(factor)
rewrite = True
else:
if factor.is_commutative:
plain.append(factor)
else:
Wrapper = Basic
sums.append(Wrapper(factor))
if not rewrite:
return self
else:
if sums:
terms = Mul._expandsums(sums)
plain = Mul(*plain)
return Add(*(Mul(plain, term) for term in terms), **self.assumptions0)
else:
return Mul(*plain, **self.assumptions0)
def _eval_expand_multinomial(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_multinomial'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_log(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_complex'):
newterm = term._eval_expand_complex(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_derivative(self, s):
terms = list(self.args)
factors = []
for i in xrange(len(terms)):
t = terms[i].diff(s)
if t is S.Zero:
continue
factors.append(Mul(*(terms[:i]+[t]+terms[i+1:])))
return Add(*factors)
def _matches_simple(pattern, expr, repl_dict):
# handle (w*3).matches('x*5') -> {w: x*5/3}
coeff, terms = pattern.as_coeff_terms()
if len(terms)==1:
return terms[0].matches(expr / coeff, repl_dict)
return
def matches(pattern, expr, repl_dict={}, evaluate=False):
expr = sympify(expr)
if pattern.is_commutative and expr.is_commutative:
return AssocOp._matches_commutative(pattern, expr, repl_dict, evaluate)
# todo for commutative parts, until then use the default matches method for non-commutative products
return Basic.matches(pattern, expr, repl_dict, evaluate)
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs/rhs, but treats arguments like symbols, so things like
oo/oo return 1, instead of a nan.
"""
if lhs == rhs:
return S.One
if lhs.is_Mul and rhs.is_Mul:
a = list(lhs.args[:])
b = [1]
for x in rhs.args:
if x in a:
a.remove(x)
else:
b.append(x)
return Mul(*a)/Mul(*b)
return lhs / rhs
def as_powers_dict(self):
return dict([ term.as_base_exp() for term in self ])
def as_numer_denom(self):
numers, denoms = [],[]
for t in self.args:
n,d = t.as_numer_denom()
numers.append(n)
denoms.append(d)
return Mul(*numers), Mul(*denoms)
@cacheit
def count_ops(self, symbolic=True):
if symbolic:
return Add(*[t.count_ops(symbolic) for t in self.args]) + \
Symbol('MUL') * (len(self.args) - 1)
return Add(*[t.count_ops(symbolic) for t in self.args]) + \
(len(self.args) - 1)
def _eval_is_polynomial(self, syms):
for term in self.args:
if not term._eval_is_polynomial(syms):
return False
return True
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded')
_eval_is_commutative = lambda self: self._eval_template_is_attr('is_commutative')
_eval_is_integer = lambda self: self._eval_template_is_attr('is_integer')
_eval_is_comparable = lambda self: self._eval_template_is_attr('is_comparable')
# I*I -> R, I*I*I -> -I
def _eval_is_real(self):
im_count = 0
re_not = False
for t in self.args:
if t.is_imaginary:
im_count += 1
continue
t_real = t.is_real
if t_real:
continue
elif fuzzy_not(t_real):
re_not = True
else:
return None
if re_not:
return False
return (im_count % 2 == 0)
def _eval_is_imaginary(self):
im_count = 0
for t in self.args:
if t.is_imaginary:
im_count += 1
elif t.is_real:
continue
# real=F|U
else:
return None
return (im_count % 2 == 1)
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a: return True
if a is None: return
return False
def _eval_is_positive(self):
terms = [t for t in self.args if not t.is_positive]
if not terms:
return True
c = terms[0]
if len(terms)==1:
if c.is_nonpositive:
return False
return
r = Mul(*terms[1:])
if c.is_negative and r.is_negative:
return True
if r.is_negative and c.is_negative:
return True
# check for nonpositivity, <=0
if c.is_negative and r.is_nonnegative:
return False
if r.is_negative and c.is_nonnegative:
return False
if c.is_nonnegative and r.is_nonpositive:
return False
if r.is_nonnegative and c.is_nonpositive:
return False
def _eval_is_negative(self):
terms = [t for t in self.args if not t.is_positive]
if not terms:
# all terms are either positive -- 2*Symbol('n', positive=T)
# or unknown -- 2*Symbol('x')
if self.is_positive:
return False
else:
return None
c = terms[0]
if len(terms)==1:
return c.is_negative
r = Mul(*terms[1:])
# check for nonnegativity, >=0
if c.is_negative and r.is_nonpositive:
return False
if r.is_negative and c.is_nonpositive:
return False
if c.is_nonpositive and r.is_nonpositive:
return False
if c.is_nonnegative and r.is_nonnegative:
return False
def _eval_is_odd(self):
is_integer = self.is_integer
if is_integer:
r = True
for t in self.args:
if t.is_even:
return False
if t.is_odd is None:
r = None
return r
# !integer -> !odd
elif is_integer == False:
return False
def _eval_is_even(self):
is_integer = self.is_integer
if is_integer:
return fuzzy_not(self._eval_is_odd())
elif is_integer == False:
return False
def _eval_subs(self, old, new):
# base cases
# simpliest
if self == old:
return new
# pass it off to its own class
if isinstance(old, FunctionClass):
return self.__class__(*[s._eval_subs(old, new) for s in self.args ])
# break up self and old into terms
coeff_self,terms_self = self.as_coeff_terms()
coeff_old,terms_old = old.as_coeff_terms()
# NEW - implementation of strict substitution
# if the coefficients are not the same, do not substitute.
# the only exception is if old has a coefficient of 1, then always to the sub.
if coeff_self != coeff_old and coeff_old != 1:
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
# break up powers, i.e., x**2 -> x*x
otemp, stemp = [], []
for o in terms_old:
if isinstance(o,Pow) and isinstance(o.exp, Integer):
if o.exp.is_positive:
for i in range(o.exp): otemp.append(o.base)
elif o.exp.is_negative:
for i in range(abs(o.exp)): otemp.append(1/s.base)
else: otemp.append(o)
for s in terms_self:
if isinstance(s,Pow) and isinstance(s.exp, Integer):
if s.exp.is_positive:
for i in range(s.exp): stemp.append(s.base)
elif s.exp.is_negative:
for i in range(abs(s.exp)): stemp.append(1/s.base)
else: stemp.append(s)
terms_old = otemp
terms_self = stemp
# break up old and self terms into commutative and noncommutative lists
comm_old = []; noncomm_old = []
comm_self = []; noncomm_self = []
for o in terms_old:
if o.is_commutative:
comm_old.append(o)
else:
noncomm_old.append(o)
for s in terms_self:
if s.is_commutative:
comm_self.append(s)
else:
noncomm_self.append(s)
comm_old_len, noncomm_old_len = len(comm_old), len(noncomm_old)
comm_self_len, noncomm_self_len = len(comm_self), len(noncomm_self)
# if the noncommutative part of the 'to-be-replaced' expression is smaller
# than the noncommutative part of the whole expression, scan to see if the
# whole thing is there
if noncomm_old_len <= noncomm_self_len and noncomm_old_len > 0:
for i in range(noncomm_self_len):
if noncomm_self[i] == noncomm_old[0]:
for j in range(noncomm_old_len):
# make sure each noncommutative term matches in order
if (i+j) < noncomm_self_len and noncomm_self[i+j] == noncomm_old[j]:
# we only care once we've reached the end of old's noncommutative part.
if j == noncomm_old_len-1:
# get rid of noncommutative terms and substitute new expression into total expression
noncomms_final = noncomm_self[:i]+noncomm_self[i+j+1:]
noncomms_final.insert(i,new)
myFlag = True
comms_final = comm_self[:]
# check commutative terms
for ele in comm_old:
# flag to make sure all the commutative terms in old are in self
if ele not in comm_self:
myFlag = False
# collect commutative terms
else:
comms_final.remove(ele)
# continue only if all commutative terms in old are present
if myFlag == True:
expr = comms_final+noncomms_final
return Mul(coeff_self/coeff_old, Mul(*expr)._eval_subs(old,new))#*[e._eval_subs(old,new) for e in expr])
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
# but what if the noncommutative lists subexpression and the whole expression are both empty
elif noncomm_old_len == noncomm_self_len == 0:
# just check commutative parts then.
if comm_old_len > 0 and comm_old_len<=comm_self_len:
if comm_self == comm_old:
return Mul(coeff_self/coeff_old*new)
myFlag = True
comms_final = comm_self[:]
# check commutative terms
for ele in comm_old:
# flag to make sure all the commutative terms in old are in self
if ele not in comm_self:
myFlag = False
# collect commutative terms
else:
# needed if old has an element to an integer power
if ele in comms_final:
comms_final.remove(ele)
else:
myFlag = False
# continue only if all commutative terms in old are present
if myFlag == True:
return Mul(coeff_self/coeff_old, new, Mul(*comms_final)._eval_subs(old,new))#*[c._eval_subs(old,new) for c in comms_final])
else:
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
# else the subexpression isn't in the totaly expression
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
def _eval_nseries(self, x, x0, n):
from sympy import powsimp
terms = [t.nseries(x, x0, n) for t in self.args]
return powsimp(Mul(*terms).expand(), combine='exp', deep=True)
def _eval_as_leading_term(self, x):
return Mul(*[t.as_leading_term(x) for t in self.args])
def _eval_conjugate(self):
return Mul(*[t.conjugate() for t in self.args])
def _sage_(self):
s = 1
for x in self.args:
s *= x._sage_()
return s
from power import Pow
from numbers import Real
from function import FunctionClass
from sympify import sympify
from add import Add
|
|
""" Testing utility functions
"""
import numpy as np
import random
from dipy.core.geometry import (sphere2cart, cart2sphere,
nearest_pos_semi_def,
sphere_distance,
cart_distance,
vector_cosine,
lambert_equal_area_projection_polar,
circumradius,
vec2vec_rotmat,
vector_norm,
compose_transformations,
compose_matrix,
decompose_matrix,
perpendicular_directions,
dist_to_corner,
is_hemispherical)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_raises, assert_almost_equal,
run_module_suite)
from dipy.testing.spherepoints import sphere_points
from dipy.core.sphere_stats import random_uniform_on_sphere
from itertools import permutations
def test_vector_norm():
A = np.array([[1, 0, 0],
[3, 4, 0],
[0, 5, 12],
[1, 2, 3]])
expected = np.array([1, 5, 13, np.sqrt(14)])
assert_array_almost_equal(vector_norm(A), expected)
expected.shape = (4, 1)
assert_array_almost_equal(vector_norm(A, keepdims=True), expected)
assert_array_almost_equal(vector_norm(A.T, axis=0, keepdims=True),
expected.T)
def test_sphere_cart():
# test arrays of points
rs, thetas, phis = cart2sphere(*(sphere_points.T))
xyz = sphere2cart(rs, thetas, phis)
assert_array_almost_equal(xyz, sphere_points.T)
# test radius estimation
big_sph_pts = sphere_points * 10.4
rs, thetas, phis = cart2sphere(*big_sph_pts.T)
assert_array_almost_equal(rs, 10.4)
xyz = sphere2cart(rs, thetas, phis)
assert_array_almost_equal(xyz, big_sph_pts.T, decimal=6)
# test that result shapes match
x, y, z = big_sph_pts.T
r, theta, phi = cart2sphere(x[:1], y[:1], z)
assert_equal(r.shape, theta.shape)
assert_equal(r.shape, phi.shape)
x, y, z = sphere2cart(r[:1], theta[:1], phi)
assert_equal(x.shape, y.shape)
assert_equal(x.shape, z.shape)
# test a scalar point
pt = sphere_points[3]
r, theta, phi = cart2sphere(*pt)
xyz = sphere2cart(r, theta, phi)
assert_array_almost_equal(xyz, pt)
# Test full circle on x=1, y=1, z=1
x, y, z = sphere2cart(*cart2sphere(1.0, 1.0, 1.0))
assert_array_almost_equal((x, y, z), (1.0, 1.0, 1.0))
def test_invert_transform():
n = 100.
theta = np.arange(n)/n * np.pi # Limited to 0,pi
phi = (np.arange(n)/n - .5) * 2 * np.pi # Limited to 0,2pi
x, y, z = sphere2cart(1, theta, phi) # Let's assume they're all unit vecs
r, new_theta, new_phi = cart2sphere(x, y, z) # Transform back
assert_array_almost_equal(theta, new_theta)
assert_array_almost_equal(phi, new_phi)
def test_nearest_pos_semi_def():
B = np.diag(np.array([1, 2, 3]))
assert_array_almost_equal(B, nearest_pos_semi_def(B))
B = np.diag(np.array([0, 2, 3]))
assert_array_almost_equal(B, nearest_pos_semi_def(B))
B = np.diag(np.array([0, 0, 3]))
assert_array_almost_equal(B, nearest_pos_semi_def(B))
B = np.diag(np.array([-1, 2, 3]))
Bpsd = np.array([[0., 0., 0.], [0., 1.75, 0.], [0., 0., 2.75]])
assert_array_almost_equal(Bpsd, nearest_pos_semi_def(B))
B = np.diag(np.array([-1, -2, 3]))
Bpsd = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 2.]])
assert_array_almost_equal(Bpsd, nearest_pos_semi_def(B))
B = np.diag(np.array([-1.e-11, 0, 1000]))
Bpsd = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 1000.]])
assert_array_almost_equal(Bpsd, nearest_pos_semi_def(B))
B = np.diag(np.array([-1, -2, -3]))
Bpsd = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
assert_array_almost_equal(Bpsd, nearest_pos_semi_def(B))
def test_cart_distance():
a = [0, 1]
b = [1, 0]
assert_array_almost_equal(cart_distance(a, b), np.sqrt(2))
assert_array_almost_equal(cart_distance([1, 0], [-1, 0]), 2)
pts1 = [2, 1, 0]
pts2 = [0, 1, -2]
assert_array_almost_equal(cart_distance(pts1, pts2), np.sqrt(8))
pts2 = [[0, 1, -2],
[-2, 1, 0]]
assert_array_almost_equal(cart_distance(pts1, pts2), [np.sqrt(8), 4])
def test_sphere_distance():
# make a circle, go around...
radius = 3.2
n = 5000
n2 = n // 2
# pi at point n2 in array
angles = np.linspace(0, np.pi*2, n, endpoint=False)
x = np.sin(angles) * radius
y = np.cos(angles) * radius
# dists around half circle, including pi
half_x = x[:n2+1]
half_y = y[:n2+1]
half_dists = np.sqrt(np.diff(half_x)**2 + np.diff(half_y)**2)
# approximate distances from 0 to pi (not including 0)
csums = np.cumsum(half_dists)
# concatenated with distances from pi to 0 again
cdists = np.r_[0, csums, csums[-2::-1]]
# check approximation close to calculated
sph_d = sphere_distance([0, radius], np.c_[x, y])
assert_array_almost_equal(cdists, sph_d)
# Now check with passed radius
sph_d = sphere_distance([0, radius], np.c_[x, y], radius=radius)
assert_array_almost_equal(cdists, sph_d)
# Check points not on surface raises error when asked for
assert_raises(ValueError, sphere_distance, [1, 0], [0, 2])
# Not when check is disabled
sphere_distance([1, 0], [0, 2], None, False)
# Error when radii don't match passed radius
assert_raises(ValueError, sphere_distance, [1, 0], [0, 1], 2.0)
def test_vector_cosine():
a = [0, 1]
b = [1, 0]
assert_array_almost_equal(vector_cosine(a, b), 0)
assert_array_almost_equal(vector_cosine([1, 0], [-1, 0]), -1)
assert_array_almost_equal(vector_cosine([1, 0], [1, 1]), 1 / np.sqrt(2))
assert_array_almost_equal(vector_cosine([2, 0], [-4, 0]), -1)
pts1 = [2, 1, 0]
pts2 = [-2, -1, 0]
assert_array_almost_equal(vector_cosine(pts1, pts2), -1)
pts2 = [[-2, -1, 0],
[2, 1, 0]]
assert_array_almost_equal(vector_cosine(pts1, pts2), [-1, 1])
# test relationship with correlation
# not the same if non-zero vector mean
a = np.random.uniform(size=(100,))
b = np.random.uniform(size=(100,))
cc = np.corrcoef(a, b)[0, 1]
vcos = vector_cosine(a, b)
assert not np.allclose(cc, vcos)
# is the same if zero vector mean
a_dm = a - np.mean(a)
b_dm = b - np.mean(b)
vcos = vector_cosine(a_dm, b_dm)
assert_array_almost_equal(cc, vcos)
def test_lambert_equal_area_projection_polar():
theta = np.repeat(np.pi/3, 10)
phi = np.linspace(0, 2*np.pi, 10)
# points sit on circle with co-latitude pi/3 (60 degrees)
leap = lambert_equal_area_projection_polar(theta, phi)
assert_array_almost_equal(np.sqrt(np.sum(leap**2, axis=1)),
np.array([1., 1., 1., 1., 1.,
1., 1., 1., 1., 1.]))
# points map onto the circle of radius 1
def test_lambert_equal_area_projection_cart():
xyz = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0],
[0, 0, -1]])
# points sit on +/-1 on all 3 axes
r, theta, phi = cart2sphere(*xyz.T)
leap = lambert_equal_area_projection_polar(theta, phi)
r2 = np.sqrt(2)
assert_array_almost_equal(np.sqrt(np.sum(leap**2, axis=1)),
np.array([r2, r2, 0, r2, r2, 2]))
# x and y =+/-1 map onto circle of radius sqrt(2)
# z=1 maps to origin, and z=-1 maps to (an arbitrary point on) the
# outer circle of radius 2
def test_circumradius():
assert_array_almost_equal(np.sqrt(0.5), circumradius(np.array([0, 2, 0]),
np.array([2, 0, 0]),
np.array([0, 0, 0])))
def test_vec2vec_rotmat():
a = np.array([1, 0, 0])
for b in np.array([[0, 0, 1], [-1, 0, 0], [1, 0, 0]]):
R = vec2vec_rotmat(a, b)
assert_array_almost_equal(np.dot(R, a), b)
def test_compose_transformations():
A = np.eye(4)
A[0, -1] = 10
B = np.eye(4)
B[0, -1] = -20
C = np.eye(4)
C[0, -1] = 10
CBA = compose_transformations(A, B, C)
assert_array_equal(CBA, np.eye(4))
assert_raises(ValueError, compose_transformations, A)
def test_compose_decompose_matrix():
for translate in permutations(40 * np.random.rand(3), 3):
for angles in permutations(np.deg2rad(90 * np.random.rand(3)), 3):
for shears in permutations(3 * np.random.rand(3), 3):
for scale in permutations(3 * np.random.rand(3), 3):
mat = compose_matrix(translate=translate, angles=angles,
shear=shears, scale=scale)
sc, sh, ang, trans, _ = decompose_matrix(mat)
assert_array_almost_equal(translate, trans)
assert_array_almost_equal(angles, ang)
assert_array_almost_equal(shears, sh)
assert_array_almost_equal(scale, sc)
def test_perpendicular_directions():
num = 35
vectors_v = np.zeros((4, 3))
for v in range(4):
theta = random.uniform(0, np.pi)
phi = random.uniform(0, 2*np.pi)
vectors_v[v] = sphere2cart(1., theta, phi)
vectors_v[3] = [1, 0, 0]
for vector_v in vectors_v:
pd = perpendicular_directions(vector_v, num=num, half=False)
# see if length of pd is equal to the number of intended samples
assert_equal(num, len(pd))
# check if all directions are perpendicular to vector v
for d in pd:
cos_angle = np.dot(d, vector_v)
assert_almost_equal(cos_angle, 0)
# check if directions are sampled by multiples of 2*pi / num
delta_a = 2. * np.pi / num
for d in pd[1:]:
angle = np.arccos(np.dot(pd[0], d))
rest = angle % delta_a
if rest > delta_a * 0.99: # To correct cases of negative error
rest = rest - delta_a
assert_almost_equal(rest, 0)
def _rotation_from_angles(r):
R = np.array([[1, 0, 0],
[0, np.cos(r[0]), np.sin(r[0])],
[0, -np.sin(r[0]), np.cos(r[0])]])
R = np.dot(R, np.array([[np.cos(r[1]), 0, np.sin(r[1])],
[0, 1, 0],
[-np.sin(r[1]), 0, np.cos(r[1])]]))
R = np.dot(R, np.array([[np.cos(r[2]), np.sin(r[2]), 0],
[-np.sin(r[2]), np.cos(r[2]), 0],
[0, 0, 1]]))
R = np.linalg.inv(R)
return R
def test_dist_to_corner():
affine = np.eye(4)
# Calculate the distance with the pythagorean theorem:
pythagoras = np.sqrt(np.sum((np.diag(affine)[:-1] / 2) ** 2))
# Compare to calculation with this function:
assert_array_almost_equal(dist_to_corner(affine), pythagoras)
# Apply a rotation to the matrix, just to demonstrate the calculation is
# robust to that:
R = _rotation_from_angles(np.random.randn(3) * np.pi)
new_aff = np.vstack([np.dot(R, affine[:3, :]), [0, 0, 0, 1]])
assert_array_almost_equal(dist_to_corner(new_aff), pythagoras)
def test_is_hemispherical():
# Smoke test the ValueError for non-3D vectors
assert_raises(ValueError, is_hemispherical, np.array(
[[1, 2, 3, 4], [5, 6, 7, 8]]
))
# Test on hemispherical input
xyz = random_uniform_on_sphere(n=100, coords='xyz')
xyz = xyz[xyz[:, 2] > 0]
assert_equal(is_hemispherical(xyz)[0], True)
# Test on spherical input
xyz = random_uniform_on_sphere(n=100, coords='xyz')
assert_equal(is_hemispherical(xyz)[0], False)
# Smoke test the ValueError for non unit-vectors
assert_raises(ValueError, is_hemispherical, xyz * 2.0)
if __name__ == '__main__':
run_module_suite()
|
|
"""
@package mi.instrument.seabird.sbe16plus_v2.ctdbp_no.driver
@file mi/instrument/seabird/sbe16plus_v2/ctdbp_no/driver.py
@author Tapana Gupta
@brief Driver class for sbe16plus V2 CTD instrument.
"""
__author__ = 'Tapana Gupta'
__license__ = 'Apache 2.0'
import re
import time
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import SampleException
from xml.dom.minidom import parseString
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import OptodeCommands
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import Parameter
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import Command
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SendOptodeCommand
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19Protocol
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19DataParticle
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19StatusParticle
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19ConfigurationParticle
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import OptodeSettingsParticle
from mi.instrument.seabird.sbe16plus_v2.driver import Prompt, SBE16InstrumentDriver, Sbe16plusBaseParticle, \
WAKEUP_TIMEOUT, NEWLINE, TIMEOUT
from mi.core.instrument.protocol_param_dict import ParameterDictType, ParameterDictVisibility
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
CTD_PARSED = 'ctdbp_no_sample'
DEVICE_STATUS = 'ctdbp_no_status'
DEVICE_CALIBRATION = 'ctdbp_no_calibration_coefficients'
DEVICE_HARDWARE = 'ctdbp_no_hardware'
DEVICE_CONFIGURATION = 'ctdbp_no_configuration'
OPTODE_SETTINGS = 'ctdbp_no_optode_settings'
class SBE16NODataParticle(SBE19DataParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.CTD_PARSED
class SBE16NOConfigurationParticle(SBE19ConfigurationParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.DEVICE_CONFIGURATION
class SBE16NOStatusParticle(SBE19StatusParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.DEVICE_STATUS
class SBE16NOOptodeSettingsParticle(OptodeSettingsParticle):
"""
This data particle is identical to the corresponding one for CTDPF-Optode, except for the stream
name, which we specify here
"""
_data_particle_type = DataParticleType.OPTODE_SETTINGS
class SBE16NOHardwareParticleKey(BaseEnum):
SERIAL_NUMBER = "serial_number"
FIRMWARE_VERSION = "firmware_version"
FIRMWARE_DATE = "firmware_date"
COMMAND_SET_VERSION = "command_set_version"
PCB_SERIAL_NUMBER = "pcb_serial_number"
ASSEMBLY_NUMBER = "assembly_number"
MANUFACTURE_DATE = "manufacture_date"
TEMPERATURE_SENSOR_SERIAL_NUMBER = 'temp_sensor_serial_number'
CONDUCTIVITY_SENSOR_SERIAL_NUMBER = 'cond_sensor_serial_number'
PRESSURE_SENSOR_TYPE = 'pressure_sensor_type'
PRESSURE_SENSOR_SERIAL_NUMBER = 'quartz_pressure_sensor_serial_number'
VOLT0_TYPE = 'volt0_type'
VOLT0_SERIAL_NUMBER = 'volt0_serial_number'
VOLT1_TYPE = 'volt1_type'
VOLT1_SERIAL_NUMBER = 'volt1_serial_number'
class SBE16NOHardwareParticle(Sbe16plusBaseParticle):
_data_particle_type = DataParticleType.DEVICE_HARDWARE
@staticmethod
def regex():
"""
Regular expression to match a getHD response pattern
@return: regex string
"""
pattern = r'(<HardwareData.*?</HardwareData>)' + NEWLINE
return pattern
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(SBE16NOHardwareParticle.regex(), re.DOTALL)
@staticmethod
def resp_regex():
"""
Regular expression to match a getHD response pattern
@return: regex string
"""
pattern = r'(<HardwareData.*?</HardwareData>)'
return pattern
@staticmethod
def resp_regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(SBE16NOHardwareParticle.resp_regex(), re.DOTALL)
# noinspection PyPep8Naming
def _build_parsed_values(self):
"""
@throws SampleException If there is a problem with sample creation
"""
SENSOR = "Sensor"
TYPE = "type"
ID = "id"
PCB_SERIAL_NUMBER = "PCBSerialNum"
ASSEMBLY_NUMBER = "AssemblyNum"
SERIAL_NUMBER = "SerialNumber"
FIRMWARE_VERSION = "FirmwareVersion"
FIRMWARE_DATE = "FirmwareDate"
COMMAND_SET_VERSION = "CommandSetVersion"
PCB_ASSEMBLY = "PCBAssembly"
MANUFACTURE_DATE = "MfgDate"
INTERNAL_SENSORS = "InternalSensors"
TEMPERATURE_SENSOR_ID = "Main Temperature"
CONDUCTIVITY_SENSOR_ID = "Main Conductivity"
PRESSURE_SENSOR_ID = "Main Pressure"
EXTERNAL_SENSORS = "ExternalSensors"
VOLT0 = "volt 0"
VOLT1 = "volt 1"
# check to make sure there is a correct match before continuing
match = SBE16NOHardwareParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of parsed hardware data: [%s]" %
self.raw_data)
dom = parseString(self.raw_data)
root = dom.documentElement
log.debug("root.tagName = %s", root.tagName)
serial_number = root.getAttribute(SERIAL_NUMBER)
firmware_version = self._extract_xml_element_value(root, FIRMWARE_VERSION)
firmware_date = self._extract_xml_element_value(root, FIRMWARE_DATE)
command_set_version = self._extract_xml_element_value(root, COMMAND_SET_VERSION)
manufacture_date = self._extract_xml_element_value(root, MANUFACTURE_DATE)
pcb_assembly_elements = self._extract_xml_elements(root, PCB_ASSEMBLY)
pcb_serial_number = []
pcb_assembly = []
for assembly in pcb_assembly_elements:
pcb_serial_number.append(assembly.getAttribute(PCB_SERIAL_NUMBER))
pcb_assembly.append(assembly.getAttribute(ASSEMBLY_NUMBER))
temperature_sensor_serial_number = ""
conductivity_sensor_serial_number = ""
pressure_sensor_serial_number = ""
pressure_sensor_type = ""
volt0_serial_number = 0
volt0_type = ""
volt1_serial_number = 0
volt1_type = ""
internal_sensors_element = self._extract_xml_elements(root, INTERNAL_SENSORS)[0]
sensors = self._extract_xml_elements(internal_sensors_element, SENSOR)
for sensor in sensors:
sensor_id = sensor.getAttribute(ID)
if sensor_id == TEMPERATURE_SENSOR_ID:
temperature_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
elif sensor_id == CONDUCTIVITY_SENSOR_ID:
conductivity_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
elif sensor_id == PRESSURE_SENSOR_ID:
pressure_sensor_serial_number = str(self._extract_xml_element_value(sensor, SERIAL_NUMBER))
pressure_sensor_type = self._extract_xml_element_value(sensor, TYPE)
external_sensors_element = self._extract_xml_elements(root, EXTERNAL_SENSORS)[0]
sensors = self._extract_xml_elements(external_sensors_element, SENSOR)
for sensor in sensors:
sensor_id = sensor.getAttribute(ID)
if sensor_id == VOLT0:
volt0_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
volt0_type = self._extract_xml_element_value(sensor, TYPE)
elif sensor_id == VOLT1:
volt1_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)
volt1_type = self._extract_xml_element_value(sensor, TYPE)
result = [{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: str(serial_number)},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.FIRMWARE_VERSION,
DataParticleKey.VALUE: firmware_version},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.FIRMWARE_DATE,
DataParticleKey.VALUE: firmware_date},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.COMMAND_SET_VERSION,
DataParticleKey.VALUE: command_set_version},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.MANUFACTURE_DATE,
DataParticleKey.VALUE: manufacture_date},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PCB_SERIAL_NUMBER,
DataParticleKey.VALUE: pcb_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.ASSEMBLY_NUMBER,
DataParticleKey.VALUE: pcb_assembly},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.TEMPERATURE_SENSOR_SERIAL_NUMBER,
DataParticleKey.VALUE: temperature_sensor_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.CONDUCTIVITY_SENSOR_SERIAL_NUMBER,
DataParticleKey.VALUE: conductivity_sensor_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PRESSURE_SENSOR_SERIAL_NUMBER,
DataParticleKey.VALUE: pressure_sensor_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PRESSURE_SENSOR_TYPE,
DataParticleKey.VALUE: pressure_sensor_type},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT0_SERIAL_NUMBER,
DataParticleKey.VALUE: volt0_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT0_TYPE,
DataParticleKey.VALUE: volt0_type},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT1_SERIAL_NUMBER,
DataParticleKey.VALUE: volt1_serial_number},
{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT1_TYPE,
DataParticleKey.VALUE: volt1_type}]
return result
class SBE16NOCalibrationParticleKey(BaseEnum):
SERIAL_NUMBER = "serial_number"
TEMP_SENSOR_SERIAL_NUMBER = "temp_sensor_serial_number"
TEMP_CAL_DATE = "calibration_date_temperature"
TA0 = "temp_coeff_ta0"
TA1 = "temp_coeff_ta1"
TA2 = "temp_coeff_ta2"
TA3 = "temp_coeff_ta3"
TOFFSET = "temp_coeff_offset"
COND_SENSOR_SERIAL_NUMBER = "cond_sensor_serial_number"
COND_CAL_DATE = "calibration_date_conductivity"
CONDG = "cond_coeff_cg"
CONDH = "cond_coeff_ch"
CONDI = "cond_coeff_ci"
CONDJ = "cond_coeff_cj"
CPCOR = "cond_coeff_cpcor"
CTCOR = "cond_coeff_ctcor"
CSLOPE = "cond_coeff_cslope"
PRES_SERIAL_NUMBER = "pressure_sensor_serial_number"
PRES_CAL_DATE = "calibration_date_pressure"
PC1 = "press_coeff_pc1"
PC2 = "press_coeff_pc2"
PC3 = "press_coeff_pc3"
PD1 = "press_coeff_pd1"
PD2 = "press_coeff_pd2"
PT1 = "press_coeff_pt1"
PT2 = "press_coeff_pt2"
PT3 = "press_coeff_pt3"
PT4 = "press_coeff_pt4"
PSLOPE = "press_coeff_pslope"
POFFSET = "press_coeff_poffset"
PRES_RANGE = "pressure_sensor_range"
EXT_VOLT0_OFFSET = "ext_volt0_offset"
EXT_VOLT0_SLOPE = "ext_volt0_slope"
EXT_VOLT1_OFFSET = "ext_volt1_offset"
EXT_VOLT1_SLOPE = "ext_volt1_slope"
EXT_VOLT2_OFFSET = "ext_volt2_offset"
EXT_VOLT2_SLOPE = "ext_volt2_slope"
EXT_VOLT3_OFFSET = "ext_volt3_offset"
EXT_VOLT3_SLOPE = "ext_volt3_slope"
EXT_VOLT4_OFFSET = "ext_volt4_offset"
EXT_VOLT4_SLOPE = "ext_volt4_slope"
EXT_VOLT5_OFFSET = "ext_volt5_offset"
EXT_VOLT5_SLOPE = "ext_volt5_slope"
EXT_FREQ = "ext_freq_sf"
class SBE16NOCalibrationParticle(Sbe16plusBaseParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.DEVICE_CALIBRATION
@staticmethod
def regex():
pattern = r'(<CalibrationCoefficients.*?</CalibrationCoefficients>)' + NEWLINE
return pattern
@staticmethod
def regex_compiled():
return re.compile(SBE16NOCalibrationParticle.regex(), re.DOTALL)
@staticmethod
def resp_regex():
pattern = r'(<CalibrationCoefficients.*?</CalibrationCoefficients>)'
return pattern
@staticmethod
def resp_regex_compiled():
return re.compile(SBE16NOCalibrationParticle.resp_regex(), re.DOTALL)
def _map_param_to_xml_tag(self, parameter_name):
map_param_to_tag = {SBE16NOCalibrationParticleKey.TEMP_SENSOR_SERIAL_NUMBER: "SerialNum",
SBE16NOCalibrationParticleKey.TEMP_CAL_DATE: "CalDate",
SBE16NOCalibrationParticleKey.TA0: "TA0",
SBE16NOCalibrationParticleKey.TA1: "TA1",
SBE16NOCalibrationParticleKey.TA2: "TA2",
SBE16NOCalibrationParticleKey.TA3: "TA3",
SBE16NOCalibrationParticleKey.TOFFSET: "TOFFSET",
SBE16NOCalibrationParticleKey.COND_SENSOR_SERIAL_NUMBER: "SerialNum",
SBE16NOCalibrationParticleKey.COND_CAL_DATE: "CalDate",
SBE16NOCalibrationParticleKey.CONDG: "G",
SBE16NOCalibrationParticleKey.CONDH: "H",
SBE16NOCalibrationParticleKey.CONDI: "I",
SBE16NOCalibrationParticleKey.CONDJ: "J",
SBE16NOCalibrationParticleKey.CPCOR: "CPCOR",
SBE16NOCalibrationParticleKey.CTCOR: "CTCOR",
SBE16NOCalibrationParticleKey.CSLOPE: "CSLOPE",
SBE16NOCalibrationParticleKey.PRES_SERIAL_NUMBER: "SerialNum",
SBE16NOCalibrationParticleKey.PRES_CAL_DATE: "CalDate",
SBE16NOCalibrationParticleKey.PC1: "PC1",
SBE16NOCalibrationParticleKey.PC2: "PC2",
SBE16NOCalibrationParticleKey.PC3: "PC3",
SBE16NOCalibrationParticleKey.PD1: "PD1",
SBE16NOCalibrationParticleKey.PD2: "PD2",
SBE16NOCalibrationParticleKey.PT1: "PT1",
SBE16NOCalibrationParticleKey.PT2: "PT2",
SBE16NOCalibrationParticleKey.PT3: "PT3",
SBE16NOCalibrationParticleKey.PT4: "PT4",
SBE16NOCalibrationParticleKey.PSLOPE: "PSLOPE",
SBE16NOCalibrationParticleKey.POFFSET: "POFFSET",
SBE16NOCalibrationParticleKey.PRES_RANGE: "PRANGE",
SBE16NOCalibrationParticleKey.EXT_VOLT0_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT0_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT1_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT1_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT2_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT2_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT3_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT3_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT4_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT4_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_VOLT5_OFFSET: "OFFSET",
SBE16NOCalibrationParticleKey.EXT_VOLT5_SLOPE: "SLOPE",
SBE16NOCalibrationParticleKey.EXT_FREQ: "EXTFREQSF"}
return map_param_to_tag[parameter_name]
# noinspection PyPep8Naming
def _build_parsed_values(self):
"""
Parse the output of the getCC command
@throws SampleException If there is a problem with sample creation
"""
SERIAL_NUMBER = "SerialNumber"
CALIBRATION = "Calibration"
ID = "id"
TEMPERATURE_SENSOR_ID = "Main Temperature"
CONDUCTIVITY_SENSOR_ID = "Main Conductivity"
PRESSURE_SENSOR_ID = "Main Pressure"
VOLT0 = "Volt 0"
VOLT1 = "Volt 1"
VOLT2 = "Volt 2"
VOLT3 = "Volt 3"
VOLT4 = "Volt 4"
VOLT5 = "Volt 5"
EXTERNAL_FREQUENCY_CHANNEL = "external frequency channel"
# check to make sure there is a correct match before continuing
match = SBE16NOCalibrationParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of parsed calibration data: [%s]" %
self.raw_data)
dom = parseString(self.raw_data)
root = dom.documentElement
log.debug("root.tagName = %s", root.tagName)
serial_number = root.getAttribute(SERIAL_NUMBER)
result = [{DataParticleKey.VALUE_ID: SBE16NOCalibrationParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: serial_number}]
calibration_elements = self._extract_xml_elements(root, CALIBRATION)
for calibration in calibration_elements:
id_attr = calibration.getAttribute(ID)
if id_attr == TEMPERATURE_SENSOR_ID:
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TEMP_SENSOR_SERIAL_NUMBER, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TEMP_CAL_DATE, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA0))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TA3))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.TOFFSET))
elif id_attr == CONDUCTIVITY_SENSOR_ID:
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.COND_SENSOR_SERIAL_NUMBER, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.COND_CAL_DATE, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDG))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDH))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDI))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CONDJ))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CPCOR))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CTCOR))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.CSLOPE))
elif id_attr == PRESSURE_SENSOR_ID:
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PRES_SERIAL_NUMBER, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PRES_CAL_DATE, str))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PC1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PC2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PC3))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PD1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PD2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT1))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT2))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT3))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PT4))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PSLOPE))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.POFFSET))
result.append(
self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.PRES_RANGE, self.float_to_int))
elif id_attr == VOLT0:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT0_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT0_SLOPE))
elif id_attr == VOLT1:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT1_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT1_SLOPE))
elif id_attr == VOLT2:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT2_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT2_SLOPE))
elif id_attr == VOLT3:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT3_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT3_SLOPE))
elif id_attr == VOLT4:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT4_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT4_SLOPE))
elif id_attr == VOLT5:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT5_OFFSET))
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_VOLT5_SLOPE))
elif id_attr == EXTERNAL_FREQUENCY_CHANNEL:
result.append(self._get_xml_parameter(calibration, SBE16NOCalibrationParticleKey.EXT_FREQ))
return result
###############################################################################
# Seabird Electronics 16plus V2 NO Driver.
###############################################################################
class InstrumentDriver(SBE16InstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = SBE16NOProtocol(Prompt, NEWLINE, self._driver_event)
###############################################################################
# Seabird Electronics 16plus V2 NO protocol.
###############################################################################
class SBE16NOProtocol(SBE19Protocol):
"""
Instrument protocol class for SBE16 NO driver.
Subclasses SBE16Protocol
"""
def __init__(self, prompts, newline, driver_event):
"""
SBE16Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The SBE16 newline.
@param driver_event Driver process event callback.
"""
SBE19Protocol.__init__(self, prompts, newline, driver_event)
@staticmethod
def sieve_function(raw_data):
""" The method that splits samples
Over-ride sieve function to handle additional particles.
"""
matchers = []
return_list = []
matchers.append(SBE16NODataParticle.regex_compiled())
matchers.append(SBE16NOHardwareParticle.regex_compiled())
matchers.append(SBE16NOCalibrationParticle.regex_compiled())
matchers.append(SBE16NOStatusParticle.regex_compiled())
matchers.append(SBE16NOConfigurationParticle.regex_compiled())
matchers.append(SBE16NOOptodeSettingsParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
Over-ride sieve function to handle additional particles.
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
if self._extract_sample(SBE16NODataParticle, SBE16NODataParticle.regex_compiled(), chunk, timestamp):
self._sampling = True
return
for particle_class in SBE16NOHardwareParticle, \
SBE16NOCalibrationParticle, \
SBE16NOConfigurationParticle, \
SBE16NOStatusParticle, \
SBE16NOOptodeSettingsParticle:
if self._extract_sample(particle_class, particle_class.regex_compiled(), chunk, timestamp):
return
########################################################################
# Command handlers.
########################################################################
def _handler_command_acquire_status(self, *args, **kwargs):
"""
Get device status
"""
result = []
result.append(self._do_cmd_resp(Command.GET_SD, response_regex=SBE16NOStatusParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_command_acquire_status: GetSD Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_HD, response_regex=SBE16NOHardwareParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_command_acquire_status: GetHD Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_CD, response_regex=SBE16NOConfigurationParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_command_acquire_status: GetCD Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_CC, response_regex=SBE16NOCalibrationParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_command_acquire_status: GetCC Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_EC, timeout=TIMEOUT))
log.debug("_handler_command_acquire_status: GetEC Response: %s", result)
#Reset the event counter right after getEC
self._do_cmd_resp(Command.RESET_EC, timeout=TIMEOUT)
#Now send commands to the Optode to get its status
#Stop the optode first, need to send the command twice
stop_command = "stop"
start_command = "start"
self._do_cmd_resp(OptodeCommands.SEND_OPTODE, stop_command, timeout=TIMEOUT)
time.sleep(2)
self._do_cmd_resp(OptodeCommands.SEND_OPTODE, stop_command, timeout=TIMEOUT)
time.sleep(3)
#Send all the 'sendoptode=' commands one by one
optode_commands = SendOptodeCommand.list()
for command in optode_commands:
log.debug("Sending optode command: %s" % command)
result.append(self._do_cmd_resp(OptodeCommands.SEND_OPTODE, command, timeout=TIMEOUT))
log.debug("_handler_command_acquire_status: SendOptode Response: %s", result)
#restart the optode
self._do_cmd_resp(OptodeCommands.SEND_OPTODE, start_command, timeout=TIMEOUT)
return None, (None, ''.join(result))
def _handler_autosample_acquire_status(self, *args, **kwargs):
"""
Get device status
"""
result = []
# When in autosample this command requires two wakeups to get to the right prompt
self._wakeup(timeout=WAKEUP_TIMEOUT)
self._wakeup(timeout=WAKEUP_TIMEOUT)
result.append(self._do_cmd_resp(Command.GET_SD, response_regex=SBE16NOStatusParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_autosample_acquire_status: GetSD Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_HD, response_regex=SBE16NOHardwareParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_autosample_acquire_status: GetHD Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_CD, response_regex=SBE16NOConfigurationParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_autosample_acquire_status: GetCD Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_CC, response_regex=SBE16NOCalibrationParticle.regex_compiled(),
timeout=TIMEOUT))
log.debug("_handler_autosample_acquire_status: GetCC Response: %s", result)
result.append(self._do_cmd_resp(Command.GET_EC, timeout=TIMEOUT))
log.debug("_handler_autosample_acquire_status: GetEC Response: %s", result)
#Reset the event counter right after getEC
self._do_cmd_no_resp(Command.RESET_EC)
return None, (None, ''.join(result))
########################################################################
# response handlers.
########################################################################
def _validate_GetCD_response(self, response, prompt):
"""
validation handler for GetCD command
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if command misunderstood.
"""
error = self._find_error(response)
if error:
log.error("GetCD command encountered error; type='%s' msg='%s'", error[0], error[1])
raise InstrumentProtocolException('GetCD command failure: type="%s" msg="%s"' % (error[0], error[1]))
if not SBE16NOConfigurationParticle.resp_regex_compiled().search(response):
log.error('_validate_GetCD_response: GetCD command not recognized: %s.' % response)
raise InstrumentProtocolException('GetCD command not recognized: %s.' % response)
self._param_dict.update_many(response)
return response
def _validate_GetCC_response(self, response, prompt):
"""
validation handler for GetCC command
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if command misunderstood.
"""
error = self._find_error(response)
if error:
log.error("GetCC command encountered error; type='%s' msg='%s'", error[0], error[1])
raise InstrumentProtocolException('GetCC command failure: type="%s" msg="%s"' % (error[0], error[1]))
if not SBE16NOCalibrationParticle.resp_regex_compiled().search(response):
log.error('_validate_GetCC_response: GetCC command not recognized: %s.' % response)
raise InstrumentProtocolException('GetCC command not recognized: %s.' % response)
return response
def _validate_GetHD_response(self, response, prompt):
"""
validation handler for GetHD command
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if command misunderstood.
"""
error = self._find_error(response)
if error:
log.error("GetHD command encountered error; type='%s' msg='%s'", error[0], error[1])
raise InstrumentProtocolException('GetHD command failure: type="%s" msg="%s"' % (error[0], error[1]))
if not SBE16NOHardwareParticle.resp_regex_compiled().search(response):
log.error('_validate_GetHD_response: GetHD command not recognized: %s.' % response)
raise InstrumentProtocolException('GetHD command not recognized: %s.' % response)
self._param_dict.update_many(response)
return response
def _build_ctd_specific_params(self):
self._param_dict.add(Parameter.PTYPE,
r"<Sensor id = 'Main Pressure'>.*?<type>(.*?)</type>.*?</Sensor>",
self._pressure_sensor_to_int,
str,
type=ParameterDictType.INT,
display_name="Pressure Sensor Type",
startup_param=True,
direct_access=True,
default_value=3,
description="Sensor type: (1:strain gauge | 3:quartz with temp comp)",
visibility=ParameterDictVisibility.IMMUTABLE,
regex_flags=re.DOTALL)
def create_playback_protocol(callback):
return SBE16NOProtocol(None, None, callback)
|
|
#!/usr/bin/python
# Standard Library
import argparse
import datetime
import json
import logging
import os
import subprocess
from functools import wraps
# Third Party
import requests
import mixingboard
from chassis.database import db_session
from chassis.models import User, Account, JobHistory, Notification, Token
from flask import Flask, redirect, jsonify, render_template, request, \
session, url_for
from sqlalchemy import or_
# Local
from api.main import api
# parse args
argParser = argparse.ArgumentParser(description='Run the Quarry server.')
argParser.add_argument('-d', '--debug', action='store_true', help='Turn on debug mode')
argParser.add_argument('-p', '--port', type=int, default=9000, help='Set the port')
argParser.add_argument('-H', '--host', type=str, default='127.0.0.1', help='Set the port')
argParser.add_argument('--no-sass', action='store_true', help='Disable sass compilation/watching')
args, _ = argParser.parse_known_args()
# put args in sensible all caps variables
DEBUG = args.debug
HOST = args.host
PORT = args.port
NO_SASS = args.no_sass
# set up logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# start the sass watcher if needed
if DEBUG and not NO_SASS:
logger.info("Starting SASS watcher...")
directory = os.path.dirname(os.path.abspath(__file__))
subprocess.Popen("sass --watch -l %s/static/sass/style.sass:%s/static/css/style.css" % (directory, directory), shell=True)
# create flask app
app = Flask(__name__, static_url_path='/static', static_folder='./static')
app.secret_key = 'm8ZqboHDT6u75pP1QvK4nk6R8Z6/4SyeDUTXVdIGN9'
app.register_blueprint(api, url_prefix='/beta')
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
# authentication stuff
def requiresLogin(fn):
@wraps(fn)
def authFn(*args, **kwargs):
if 'user' in session:
return fn(*args, **kwargs)
else:
return jsonify({
"error": "This method requires an authenticated user"
}), 400
return authFn
@app.route('/api/histories/<jobType>')
def history(jobType):
"""
Retrieve job history for a user/account
GetParams:
account: an account
user: a user
Returns:
a json object conatining a list of saved queries
"""
account = session['user']['account']['id']
user = session['user']['id']
offset = int(request.args.get("offset",0))
count = int(request.args.get("count",20))
# TODO allow filtering to individual user
histories = []
for history in JobHistory.query.filter(JobHistory.account_id == account, JobHistory.job_type==jobType) \
.order_by(JobHistory.created.desc()).limit(count).offset(offset):
histories.append(history.dict())
return jsonify({
"histories": histories
})
@app.route('/api/notifications')
def notifications():
"""
Retrieve notifications for a user/account
GetParams:
account: an account
user: a user
Returns:
a json object conatining a list of saved queries
"""
account = session['user']['account']['id']
user = session['user']['id']
offset = int(request.args.get("offset",0))
count = int(request.args.get("count",20))
# TODO allow filtering to individual user
notifications = []
for notification in Notification.query.filter(Notification.account_id == account, or_(Notification.user_id == user,
Notification.user_id == None), Notification.read == None) \
.order_by(Notification.created.desc()).limit(count).offset(offset):
notifications.append(notification.dict())
return jsonify({
"notifications": notifications
})
@app.route('/api/notification/<notificationId>/read', methods=["POST"])
def notification_read(notificationId):
"""
Mark a notification as read
GetParams:
account: an account
user: a user
Returns:
a json object conatining a list of saved queries
"""
account = session['user']['account']['id']
user = session['user']['id']
notification = Notification.query.filter(Notification.account_id == account, or_(Notification.user_id == user,
Notification.user_id == None), Notification.id == notificationId).first()
notification.markRead()
db_session.add(notification)
db_session.commit()
return jsonify({
"notification": notification.dict()
})
@app.route('/api/jaunt/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@requiresLogin
def jaunt(path):
"""
Forward a request to the jaunt service. No params or
returns in this doc string as it is for the most part a
passthrough method.
"""
args = dict(request.args.items())
form = dict(request.form.items())
method = request.method
url = "".join([JAUNT_URL_FORMAT, path])
account = session['user']['account']['id']
user = session['user']['id']
args['account'] = account
args['user'] = user
form['account'] = account
form['user'] = user
accountObj = Account.query.filter(Account.id == account).first()
awsKey = accountObj.access_key_id
awsSecret = accountObj.access_key_secret
s3Bucket = mixingboard.getConf("s3_bucket")
args['awsKey'] = awsKey
form['awsKey'] = awsKey
args['awsSecret'] = awsSecret
form['awsSecret'] = awsSecret
args['s3Bucket'] = s3Bucket
form['s3Bucket'] = s3Bucket
args['warehouseDir'] = "/user/%s/shark/warehouse" % accountObj.iam_username
form['warehouseDir'] = "/user/%s/shark/warehouse" % accountObj.iam_username
res = getattr(requests, method.lower())(url, params=args, data=form)
return res.text, res.status_code
@app.route('/api/lego/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@requiresLogin
def lego(path):
"""
Forward a request to the lego service. No params or
returns in this doc string as it is for the most part a
passthrough method.
"""
args = dict(request.args.items())
form = dict(request.form.items())
method = request.method
url = "".join([LEGO_URL_FORMAT, path])
account = session['user']['account']['id']
user = session['user']['id']
args['account'] = account
args['user'] = user
form['account'] = account
form['user'] = user
res = getattr(requests, method.lower())(url, params=args, data=form)
return res.text, res.status_code
@app.route('/api/shark/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@requiresLogin
def shark(path):
"""
Forward a request to the shark service. No params or
returns in this doc string as it is for the most part a
passthrough method.
"""
args = dict(request.args.items())
form = dict(request.form.items())
method = request.method
url = "".join([SHARK_URL_FORMAT, path])
account = session['user']['account']['id']
user = session['user']['id']
args['account'] = account
args['user'] = user
form['account'] = account
form['user'] = user
res = getattr(requests, method.lower())(url, params=args, data=form)
return res.text, res.status_code
@app.route('/api/redshirt/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@requiresLogin
def redshirt(path):
"""
Forward a request to the redshirt service. No params or
returns in this doc string as it is for the most part a
passthrough method.
"""
args = dict(request.args.items())
form = dict(request.form.items())
method = request.method
url = "".join([REDSHIRT_URL_FORMAT, path])
account = session['user']['account']['id']
user = session['user']['id']
args['account'] = account
args['user'] = user
form['account'] = account
form['user'] = user
res = getattr(requests, method.lower())(url, params=args, data=form)
return res.text, res.status_code
@app.route('/api/flint/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@requiresLogin
def flint(path):
"""
Forward a request to the redshirt service. No params or
returns in this doc string as it is for the most part a
passthrough method.
"""
args = dict(request.args.items())
form = dict(request.form.items())
method = request.method
url = "".join([FLINT_URL_FORMAT, path])
account = session['user']['account']['id']
user = session['user']['id']
args['account'] = account
args['user'] = user
form['account'] = account
form['user'] = user
res = getattr(requests, method.lower())(url, params=args, data=form)
return res.text, res.status_code
@app.route('/api/account')
@requiresLogin
def account():
account = Account.query.filter(Account.id == session['user']['account_id']).first()
return jsonify({
"account": account.dict()
})
@app.route('/api/account/secret')
@requiresLogin
def account_secret():
account = Account.query.filter(Account.id == session['user']['account_id']).first()
return jsonify({
"secret": account.iam_username
})
@app.route('/api/account/storage')
@requiresLogin
def account_storage():
account = Account.query.filter(Account.id == session['user']['account_id']).first()
return jsonify({
"storageUsed": account.getStorageUsage()
})
@app.route('/api/account/users')
@requiresLogin
def account_users():
account = Account.query.filter(Account.id == session['user']['account_id']).first()
users = [user.dict() for user in account.users]
return jsonify({
"users": users
})
@app.route('/api/user/me')
@requiresLogin
def user_me():
user = User.query.filter(User.id == session['user']['id']).first()
session['user'] = user.dict()
session['user']['account'] = user.account.dict()
return jsonify({
"user": session['user']
})
INVITE_CODE = '3fYsq96iSquvmRsMTzkdg'
@app.route('/api/signup', methods=['POST'])
def signup():
name = request.form['name']
email = request.form['email']
password = request.form['password']
inviteCode = request.form['inviteCode']
organization = request.form['organization']
if inviteCode != INVITE_CODE:
return jsonify({
"error": "Invalid invite code. If you were given an invite code, email hello@quarry.io for help."
}), 400
# FIXME allow users to be added to existing accounts
account = Account(organization)
db_session.add(account)
db_session.commit()
try:
user = User(name=name, email=email, password=password, accountId=account.id)
except Exception as e:
return jsonify({
"error": e.message
}), 400
db_session.add(user)
db_session.commit()
session['user'] = user.dict()
session['user']['account'] = user.account.dict()
return jsonify({
"user": user.dict()
})
@app.route('/api/login', methods=['POST'])
def login():
email = request.form['email']
password = request.form['password']
user = User.query.filter(User.email == email).first()
if user is None:
return jsonify({
"error": "No user exists with that email"
}), 400
if user.checkPassword(password):
session['user'] = user.dict()
session['user']['account'] = user.account.dict()
return jsonify({
"success": True,
"message": "Successfully logged in",
"user": session['user']
})
else:
return jsonify({
"error": "Your password is incorrect"
}), 400
@app.route('/api/account/update', methods=['POST'])
def update_account():
"""
Update an account
"""
account = Account.query.filter(Account.id == session['user']['account']['id']).first()
for key, value in request.form.items():
setattr(account, key, value)
db_session.add(account)
db_session.commit()
session['user']['account'] = account.dict()
return jsonify({
"account": session['user']['account']
})
@app.route('/api/user/me/update', methods=['POST'])
def update_user():
"""
Update a user
"""
user = User.query.filter(User.id == session['user']['id']).first()
for key, value in request.form.items():
setattr(user, key, value)
db_session.add(user)
db_session.commit()
session['user'] = user.dict()
session['user']['account'] = user.account.dict()
return jsonify({
"user": session['user']
})
@app.route('/api/logout')
def logout():
del session['user']
return redirect("/")
@app.route('/<path:path>')
def reroute(path):
return redirect("/#/%s" % path)
@app.route('/')
def index():
user = session.get('user', 'null')
if user != 'null':
user = User.query.filter(User.id == session['user']['id']).first()
if user:
session['user'] = user.dict()
session['user']['account'] = user.account.dict()
session['user']['account']['users'] = [user.dict() for user in user.account.users]
return render_template('base.html', user=json.dumps(session.get('user', None)))
# setup shark configurations
global SHARK_URL_FORMAT
SHARK_URL_FORMAT = None
def setSharkURLFormat(sharkServers):
global SHARK_URL_FORMAT
shark = sharkServers[0]
SHARK_URL_FORMAT = "http://%s:%s/shark/" % (shark["host"], shark["port"])
logging.info("GOT SHARK SERVICE: %s" % SHARK_URL_FORMAT)
mixingboard.discoverService("shark",setSharkURLFormat)
# setup lego configurations
global LEGO_URL_FORMAT
LEGO_URL_FORMAT = None
def setLegoURLFormat(legoServers):
global LEGO_URL_FORMAT
lego = legoServers[0]
LEGO_URL_FORMAT = "http://%s:%s/lego/" % (lego["host"], lego["port"])
logging.info("GOT LEGO SERVICE: %s" % LEGO_URL_FORMAT)
mixingboard.discoverService("lego",setLegoURLFormat)
# setup jaunt configurations
global JAUNT_URL_FORMAT
JAUNT_URL_FORMAT = None
def setJauntURLFormat(jauntServers):
global JAUNT_URL_FORMAT
jauntServer = jauntServers[0]
JAUNT_URL_FORMAT = "http://%s:%s/jaunt/" % (jauntServer["host"], jauntServer["port"])
logging.info("GOT JAUNT SERVICE: %s" % JAUNT_URL_FORMAT)
mixingboard.discoverService("jaunt",setJauntURLFormat)
# setup redshirt configurations
global REDSHIRT_URL_FORMAT
REDSHIRT_URL_FORMAT = None
def setSharkURLFormat(redShirtServers):
global REDSHIRT_URL_FORMAT
redShirt = redShirtServers[0]
REDSHIRT_URL_FORMAT = "http://%s:%s/redshirt/" % (redShirt["host"], redShirt["port"])
logging.info("GOT REDSHIRT SERVICE: %s" % REDSHIRT_URL_FORMAT)
mixingboard.discoverService("redshirt",setSharkURLFormat)
# setup flint configurations
global FLINT_URL_FORMAT
FLINT_URL_FORMAT = None
def setFlintURLFormat(flintServers):
global FLINT_URL_FORMAT
flint = flintServers[0]
FLINT_URL_FORMAT = "http://%s:%s/flint/" % (flint["host"], flint["port"])
logging.info("GOT FLINT SERVICE: %s" % FLINT_URL_FORMAT)
mixingboard.discoverService("flint",setFlintURLFormat)
if __name__ == "__main__":
mixingboard.exposeService("frontend", port=PORT)
app.run(debug=DEBUG, port=PORT, host=HOST, threaded=True)
|
|
#!/usr/bin/env python
import os
import sys
from svm import *
from svm import __all__ as svm_all
__all__ = ['evaluations', 'svm_load_model', 'svm_predict', 'svm_read_problem',
'svm_save_model', 'svm_train'] + svm_all
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None
Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name.encode(), model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def svm_train(arg1, arg2=None, arg3=None, arg4 = None):
"""
svm_train(W, x [, options]) -> model | ACC | MSE
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from weighted data (W, y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
assert isinstance(arg3, list)
W, y, x, options = arg1, arg2, arg3, arg4
param = svm_parameter(options)
prob = svm_problem(W, y, x, isKernel=(param.kernel_type == PRECOMPUTED))
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
return pred_labels, (ACC, MSE, SCC), pred_values
|
|
import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
#from sg_filter import savitzky_golay
from scipy.signal import savgol_filter
import scipy.fftpack
def read_JP_files(fname):
da = np.genfromtxt(fname, delimiter=" ")
return da[:,0], da[:,1], da[:,2], da[:,3],da[:,4],da[:,5]
def read_JN_files(fname):
da = np.genfromtxt(fname, delimiter=",")
return da[:,0],da[:,1],da[:,2],da[:,3],da[:,4],da[:,5]
def read_PP_files(fname):
da = np.genfromtxt(fname, delimiter=" ")
return da[:,0],da[:,1]
## Plot
fig = figure(figsize=(9,10), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(400, 4)
gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 7.0
xmin = -0.04
xmax = 1.04
#eymin = -1.0
#eymax = 1.0
eymin = -0.5
eymax = 0.5
panelh = 45
epanelh = 25
skiph = 30
mfiglim = 0
path_JP = "../../out/"
#labels
tsize = 10.0
#nu = '1'
nu = '400'
fig.text(0.5, 0.92, '$\\nu = '+nu+'$ Hz blackbody $\\rho = 1^{\circ}$', ha='center', va='center', size=tsize)
fig.text(0.5, 0.72, '$\\nu = '+nu+'$ Hz Hopf $\\rho = 1^{\circ}$', ha='center', va='center', size=tsize)
fig.text(0.5, 0.52, '$\\nu = '+nu+'$ Hz blackbody $\\rho = 30^{\circ}$', ha='center', va='center', size=tsize)
fig.text(0.5, 0.32, '$\\nu = '+nu+'$ Hz Hopf $\\rho = 30^{\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize)
for j in range(4):
if j == 0:
fname = path_JP + 'iso_sph1.txt'
#fname = path_JP + 'nu'+nu+'Hz_blackbody_rho1deg.dat'
fname2 = path_JP + 'f'+nu+'pbbr12m1.6d50i60x1.csv'
fname3 = path_JP + 'popiha/flux_'+nu+'hz_1deg.dat'
if j == 1:
fname = path_JP + 'nu'+nu+'Hz_hopf_rho1deg.dat'
fname2 = path_JP + 'f'+nu+'phopfr12m1.6d50i60x1.csv'
if j == 2:
fname = path_JP + 'iso_sph30.txt'
#fname = path_JP + 'nu'+nu+'Hz_blackbody_rho30deg.dat'
fname2 = path_JP + 'f'+nu+'pbbr12m1.6d50i60x30.csv'
fname3 = path_JP + 'popiha/flux_'+nu+'hz_30deg.dat'
if j == 3:
fname = path_JP + 'nu'+nu+'Hz_hopf_rho30deg.dat'
fname2 = path_JP + 'f'+nu+'phopfr12m1.6d50i60x30.csv'
#read JP data
phase, N2kev, N6kev, N12kev, Nbol, Fbol = read_JP_files(fname)
#read JN data
phase2, N2kev2, N6kev2, N12kev2, Nbol2, Fbol2 = read_JN_files(fname2)
phase2s = phase2
if (j == 0) or (j == 2):
phase3, flux3 = read_PP_files(fname3)
for i in range(4):
#frame for the main pulse profile fig
ax1 = subplot(gs[mfiglim:mfiglim+panelh, i])
ax1.minorticks_on()
#ax1.set_xticklabels([])
ax1.set_xlim(xmin, xmax)
#ax1.yaxis.major.formatter.set_powerlimits((0,0))
formatter = ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((0,0))
ax1.yaxis.set_major_formatter(formatter)
#not working solutions
#ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#xmft = ScalarFormatter()
#xmft.set_powerlimits((-2,2))
#ax1.xaxis.set_major_formatter(xmft)
if i == 0:
ax1.set_ylabel('$N$ (2 keV)\n[ph cm$^{-2}$ s$^{-1}$ keV$^{-1}$]',size=lsize)
flux = N2kev
flux2 = N2kev2
elif i == 1:
ax1.set_ylabel('$N$ (6 keV)',size=lsize)
flux = N6kev
flux2 = N6kev2
elif i == 2:
ax1.set_ylabel('$N$ (12 keV)',size=lsize)
flux = N12kev
flux2 = N12kev2
elif i == 3:
ax1.set_ylabel('Bolometric [ph cm$^{-2}$ s$^{-1}$]',size=lsize)
#flux = Nbol
#flux2 = Nbol2
flux = Fbol
flux2 = Fbol2
#Savitzky-Golay low-pass filtter
flux2[-1] = flux2[0]
#flux3 = savgol_filter(flux2, 15, 5, mode='wrap')
flux3 = savgol_filter(flux2, 15, 5, mode='wrap')
flux3[0:9] = flux2[0:9]
flux3[-9:-1] = flux2[-9:-1]
flux3[-1] = flux2[0]
#flux3 = flux2
#FFT filtterint
#w = scipy.fftpack.rfft(flux2)
#f = scipy.fftpack.rfftfreq(len(flux2), phase2[1]-phase2[0])
#spectrum = w**2
#
#cutoff_idx = spectrum < (spectrum.max()/1.0e6)
#print spectrum
#w2 = w.copy()
#w2[cutoff_idx] = 0
#flux3 = scipy.fftpack.irfft(w2)
#ax1.plot(phase2, flux3, "g--")
#if (j == 1) or (j == 3):
# phase2 = phase2s - 0.00005
#JP data
ax1.plot(phase, flux, 'k-')
#JN data
ax1.plot(phase2, flux2, 'r--')
#PP data
if i == 3:
if (j == 0) or (j == 2):
phase3 = phase3 - 0.0007
#ax1.plot(phase3, flux3, 'b--', linewidth=0.4)
ax1.set_yticks(ax1.get_yticks()[1:-1])
#frame for the error panel
ax2 = subplot(gs[(mfiglim+panelh):(mfiglim+panelh+epanelh), i])
ax2.minorticks_on()
ax2.set_xlim(xmin, xmax)
ax2.set_ylim(eymin, eymax)
#ax2.set_yticks(ax2.get_yticks()[1:-1])
if i == 0:
ax2.set_ylabel('$\Delta$ %',size=lsize)
#if j != 3:
# ax2.set_xticklabels([])
if j == 3:
ax2.set_xlabel('Phase', size=lsize)
ax2.plot([xmin, xmax], [0.0, 0.0], 'r--', linewidth=0.3)
#interpolate error from JN
#fluxi2 = griddata(phase2, flux2, (phase), method='cubic')
#fluxi2 = griddata(phase2, flux2, (phase), method='linear')
#err = (flux/fluxi2 - 1)*100
#ax2.plot(phase, err, 'k-', linewidth = 0.4)
#interpolate error from JP
#fluxi = griddata(phase, flux, (phase2), method='linear')
fluxi = griddata(phase, flux, (phase2), method='cubic')
err = (fluxi/flux2 - 1)*100
#ax2.plot(phase2, err, 'k-', linewidth = 0.4)
err = (fluxi/flux3 - 1)*100
ax2.plot(phase2, err, 'k-', linewidth = 0.4)
#if (j == 0) or (j == 2):
# fluxi3 = griddata(phase3, flux3, (phase), method='linear')
# err3 = (flux/fluxi3 - 1)*100
# #ax2.plot(phase, err3, 'b-', linewidth = 0.4)
#for pshift in np.linspace(-0.0005, 0.0005, 10):
# fluxi = griddata(phase+pshift, flux, (phase2), method='cubic')
# err = (fluxi/flux2 - 1)*100
# ax2.plot(phase2, err, 'b-', linewidth = 0.4)
mfiglim += panelh+epanelh+skiph
#savefig('fig2a.pdf', bbox_inches='tight')
savefig('comparison.pdf', bbox_inches='tight')
|
|
"""Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident, ntob, set, Timer, SetDaemonProperty
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform. The
:class:`SignalHandler` will ignore errors raised from attempting to register
handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask,
doc="""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
"Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(SetDaemonProperty, threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread."""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus = self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can adjust the
``match`` attribute, a regular expression. For example, to stop monitoring
cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in list(sys.modules.items()):
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
|
|
import sys, os, math
import Image
from collections import namedtuple
from clint.textui import progress
import xml.etree.ElementTree as ET
BBox = namedtuple("Bbox", 'x y w h')
AtlasData = namedtuple("Atlas", "src_images, mips, bboxes")
def NextMultipleOf(n, target):
mod = n % target
if mod == 0:
return n
return n + (target-mod)
def GetDim(ims, alignto, maxtexturesize=2048, scale_factor=1):
area = reduce(lambda tally, next : tally + next.size[0]*next.size[1]*scale_factor*scale_factor, ims, 0)
maxdim = NextMultipleOf(max([ max(img.size[0], img.size[1])*scale_factor for img in ims] ), alignto)
dim = min( maxtexturesize, max( math.pow(2, math.ceil(math.log(math.sqrt(area*1.25),2))), math.pow(2, math.ceil(math.log(maxdim,2)))))
#print("GETDIM:", area, maxdim, dim)
return dim/scale_factor
def BBoxIntersects(bb1, bb2):
if bb2.x >= bb1.x + bb1.w or \
bb2.x + bb2.w <= bb1.x or \
bb2.y + bb2.h <= bb1.y or \
bb2.y >= bb1.y + bb1.h:
return False
return True
def TryInsertImage(w,h, fblist, atlassize):
align = 4
can_insert = False
mylist = [fb for fb in fblist]
x = 0
y = 0
while y + h < atlassize:
min_y = None
ytestbb = BBox(0, y, atlassize, h)
templist = [fb for fb in mylist if BBoxIntersects(fb.bbox, ytestbb)]
while x + w <= atlassize:
testbb = BBox(x,y,w,h)
intersects = False
for fb in templist:
if BBoxIntersects(fb.bbox, testbb):
x = NextMultipleOf(fb.bbox.x + fb.bbox.w, align)
if not min_y:
min_y = fb.bbox.h+fb.bbox.y
else:
min_y = min(min_y, fb.bbox.h+fb.bbox.y)
intersects = True
break
if not intersects:
return BBox(x,y,w,h)
if min_y:
y = max(NextMultipleOf(min_y, align), y + align)
else:
y += align
x = 0
#mylist = [fb for fb in mylist if BBoxIntersects(fb.bbox, BBox(0, y, atlassize, atlassize-y))]
return None
def Clamp( lower, upper, val ):
return max( lower, min( upper, val ) )
def GenerateXMLTree( texture_filename, texture_size, bboxes, offset_amount=None ):
root = ET.Element( "Atlas" )
tex_elem = ET.SubElement( root, "Texture" )
tex_elem.set( "filename", os.path.basename( texture_filename ) )
elem_root = ET.SubElement( root, "Elements" )
# pull in the UVs by a half pixel from the edge to avoid some sampling issues, unless told otherwise
offset_amount_x = offset_amount if offset_amount != None else 0.5
offset_amount_y = offset_amount if offset_amount != None else 0.5
border_uv_offset = ( offset_amount_x / texture_size[0], offset_amount_y / texture_size[1] )
for filename, bbox in bboxes.items():
elem = ET.SubElement( elem_root, "Element" )
elem.set( "name", filename )
u1 = Clamp( 0.0, 1.0, bbox.x / float( texture_size[0] ) + border_uv_offset[0] )
v1 = Clamp( 0.0, 1.0, 1.0 - ( bbox.y + bbox.h ) / float( texture_size[1] ) + border_uv_offset[1] )
u2 = Clamp( 0.0, 1.0, ( bbox.x + bbox.w ) / float( texture_size[0] ) - border_uv_offset[0] )
v2 = Clamp( 0.0, 1.0, 1.0 - bbox.y / float( texture_size[1] ) - border_uv_offset[1] )
elem.set( "u1", str( u1 ) )
elem.set( "v1", str( v1 ) )
elem.set( "u2", str( u2 ) )
elem.set( "v2", str( v2 ) )
tree = ET.ElementTree( root )
return tree
OutImage = namedtuple("OutImage", "name im")
FullBox = namedtuple("FullBox", "im, outidx, bbox, name")
#ims: list of subimages
#outname: prefix name for output image
#returns dest, atlases
#dest = {image_name: [ (origbbox, destbbox, destatlasidx) ]
#atlases = {index : (name, image) ]
def Atlas(ims, outname, max_size=2048, scale_factor=1, ignore_exceptions=False, minimize_num_textures=True, force_square=False ):
blocksize = 4
dim = GetDim(ims, blocksize, max_size, scale_factor)
size = (dim,dim)
#sort by image area
ims = sorted(ims, key = lambda im : im.size[1]*im.size[0], reverse=True)
#Full boxes are areas where we have placed images in the atlas
fullboxes = {0 :(size,[])}
#Do the actual atlasing by sticking the largest images we can have into the smallest valid free boxes
source_idx = 0
def LocalAtlas( im, source_idx ):
if im.size[0] > size[0] or im.size[1] > size[1]:
sys.stderr.write( "Error: image " + im.name + " is larger than the atlas size!\n" )
sys.exit(2)
inserted = False
for idx, fb in fullboxes.items():
atlassize = dim
fblist = fb[1]
insertbbox = TryInsertImage(im.size[0], im.size[1], fblist, atlassize)
if insertbbox:
inserted = True
fblist.append( FullBox( im, idx, insertbbox, im.name ) )
break
if not inserted:
numoutimages = len(fullboxes)
newsize = GetDim(ims[source_idx:], blocksize, max_size, scale_factor)
fullboxes[numoutimages] = ((newsize,newsize), [FullBox(im, numoutimages, BBox(0,0,im.size[0], im.size[1]), im.name )])
return source_idx + 1
if ignore_exceptions:
for im in ims:
source_idx = LocalAtlas( im, source_idx )
else:
for im in progress.bar(ims, "Atlasing"):
source_idx = LocalAtlas( im, source_idx )
#now that we've figured out where everything goes, make the output images and blit the source images to the appropriate locations
atlases = {}
for idx, fb in fullboxes.items():
w = int(fb[0][0])
h = int(fb[0][1])
if not force_square:
#figure out if we can reduce our w or h:
sz = fb[0][0]
fblist = fb[1]
maxy = 0
maxx = 0
for b in fblist:
fbmaxy = b.bbox.y+b.bbox.h
fbmaxx = b.bbox.x+b.bbox.w
maxy = max(maxy, fbmaxy)
maxx = max(maxx, fbmaxx)
if maxy <= h/2:
h = int(h/2)
if maxx <= w/2:
w = int(w/2)
#now generate mips and such...
mips = []
divisor = 1
contained_images = {}
# Generate mips and their positions
while w >= 1 or h >= 1:
outim = OutImage( "{0}-{1}".format(outname, idx), Image.new( "RGBA", ( w, h ) ) )
for b in fb[1]:
b_w, b_h = b.im.size
b_w, b_h = b_w / divisor, b_h / divisor
if b_w > 0 and b_h > 0:
resized_b = b.im.resize( ( b_w, b_h ), Image.ANTIALIAS )
b_x, b_y = b.bbox.x / divisor, b.bbox.y / divisor
outim[1].paste( resized_b, ( b_x, b_y ) )
contained_images[ b.im ] = True
mips.append( outim )
divisor = divisor << 1
if w == 1 and h == 1:
break
w = max( 1, w >> 1 )
h = max( 1, h >> 1 )
bboxes = { b.name : b.bbox for b in fb[1] }
atlases[idx] = AtlasData( contained_images.keys(), mips, bboxes )
return atlases
|
|
# Copyright 2020 Catalyst Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common import utils
from trove.common.notification import EndNotification
from trove.guestagent import guest_log
from trove.guestagent.common import operating_system
from trove.guestagent.datastore import manager
from trove.guestagent.datastore.postgres import service
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PostgresManager(manager.Manager):
def __init__(self):
super(PostgresManager, self).__init__('postgres')
self.status = service.PgSqlAppStatus(self.docker_client)
self.app = service.PgSqlApp(self.status, self.docker_client)
self.adm = service.PgSqlAdmin(service.SUPER_USER_NAME)
@property
def configuration_manager(self):
return self.app.configuration_manager
def _check_wal_archive_size(self, archive_path, data_path):
"""Check wal archive folder size.
Return True if the size is greater than half of the data volume size.
"""
archive_size = operating_system.get_dir_size(archive_path)
data_volume_size = operating_system.get_filesystem_size(data_path)
if archive_size > (data_volume_size / 2):
LOG.info(f"The size({archive_size}) of wal archive folder is "
f"greater than half of the data volume "
f"size({data_volume_size})")
return True
return False
def _remove_older_files(self, archive_path, files, cur_file):
"""Remove files older than cur_file.
:param archive_path: The archive folder
:param files: List of the ordered file names.
:param cur_file: The compared file name.
"""
cur_seq = os.path.basename(cur_file).split('.')[0]
wal_re = re.compile(r"^([0-9A-F]{24}).*")
for wal_file in files:
m = wal_re.search(wal_file)
if m and m.group(1) < cur_seq:
file_path = os.path.join(archive_path, wal_file)
LOG.info(f"Removing wal file {file_path}")
operating_system.remove(
path=file_path, force=True, recursive=False, as_root=True)
def _remove_wals(self, archive_path, force=False):
"""Remove wal files.
If force=True, do not consider backup.
"""
files = os.listdir(archive_path)
files = sorted(files, reverse=True)
wal_files = []
if not force:
# Get latest backup file
backup_re = re.compile("[0-9A-F]{24}.*.backup")
wal_files = [wal_file for wal_file in files
if backup_re.search(wal_file)]
# If there is no backup file or force=True, remove all except the
# latest one, otherwise, remove all the files older than the backup
# file
wal_files = wal_files or files
self._remove_older_files(archive_path, files, wal_files[0])
def _clean_wals(self, archive_path, data_path, force=False):
if self._check_wal_archive_size(archive_path, data_path):
self._remove_wals(archive_path, force)
# check again with force=True
self._clean_wals(archive_path, data_path, force=True)
@periodic_task.periodic_task(
enabled=CONF.postgresql.enable_clean_wal_archives,
spacing=180)
def clean_wal_archives(self, context):
"""Clean up the wal archives to free up disk space."""
archive_path = service.WAL_ARCHIVE_DIR
data_path = cfg.get_configuration_property('mount_point')
if not operating_system.exists(archive_path, is_directory=True,
as_root=True):
return
self._clean_wals(archive_path, data_path)
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot, ds_version=None):
operating_system.ensure_directory(self.app.datadir,
user=CONF.database_service_uid,
group=CONF.database_service_uid,
as_root=True)
operating_system.ensure_directory(service.WAL_ARCHIVE_DIR,
user=CONF.database_service_uid,
group=CONF.database_service_uid,
as_root=True)
LOG.info('Preparing database config files')
self.app.configuration_manager.reset_configuration(config_contents)
self.app.set_data_dir(self.app.datadir)
self.app.update_overrides(overrides)
# Prepare pg_hba.conf
self.app.apply_access_rules()
self.configuration_manager.apply_system_override(
{'hba_file': service.HBA_CONFIG_FILE})
# Restore data from backup and reset root password
if backup_info:
self.perform_restore(context, self.app.datadir, backup_info)
if not snapshot:
signal_file = f"{self.app.datadir}/recovery.signal"
operating_system.execute_shell_cmd(
f"touch {signal_file}", [], shell=True, as_root=True)
operating_system.chown(signal_file, CONF.database_service_uid,
CONF.database_service_uid, force=True,
as_root=True)
if snapshot:
# This instance is a replica
self.attach_replica(context, snapshot, snapshot['config'])
# config_file can only be set on the postgres command line
command = f"postgres -c config_file={service.CONFIG_FILE}"
self.app.start_db(ds_version=ds_version, command=command)
def apply_overrides(self, context, overrides):
"""Reload config."""
LOG.info("Reloading database config.")
self.app.apply_overrides(overrides)
LOG.info("Finished reloading database config.")
def get_datastore_log_defs(self):
owner = cfg.get_configuration_property('database_service_uid')
datastore_dir = self.app.get_data_dir()
long_query_time = CONF.get(self.manager).get(
'guest_log_long_query_time')
general_log_file = self.build_log_file_name(
self.GUEST_LOG_DEFS_GENERAL_LABEL, owner,
datastore_dir=datastore_dir)
general_log_dir, general_log_filename = os.path.split(general_log_file)
return {
self.GUEST_LOG_DEFS_GENERAL_LABEL: {
self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER,
self.GUEST_LOG_USER_LABEL: owner,
self.GUEST_LOG_FILE_LABEL: general_log_file,
self.GUEST_LOG_ENABLE_LABEL: {
'logging_collector': True,
'log_destination': 'stderr',
'log_directory': general_log_dir,
'log_filename': general_log_filename,
'log_statement': 'all',
'debug_print_plan': True,
'log_min_duration_statement': long_query_time,
},
self.GUEST_LOG_DISABLE_LABEL: {
'logging_collector': False,
},
self.GUEST_LOG_RESTART_LABEL: True,
},
}
def is_log_enabled(self, logname):
return self.configuration_manager.get_value('logging_collector', False)
def create_backup(self, context, backup_info):
"""Create backup for the database.
:param context: User context object.
:param backup_info: a dictionary containing the db instance id of the
backup task, location, type, and other data.
"""
LOG.info(f"Creating backup {backup_info['id']}")
with EndNotification(context):
volumes_mapping = {
'/var/lib/postgresql/data': {
'bind': '/var/lib/postgresql/data', 'mode': 'rw'
},
"/var/run/postgresql": {"bind": "/var/run/postgresql",
"mode": "ro"},
}
extra_params = f"--pg-wal-archive-dir {service.WAL_ARCHIVE_DIR}"
self.app.create_backup(context, backup_info,
volumes_mapping=volumes_mapping,
need_dbuser=False,
extra_params=extra_params)
def attach_replica(self, context, replica_info, slave_config,
restart=False):
"""Set up the standby server."""
self.replication.enable_as_slave(self.app, replica_info, None)
# For the previous primary, don't start db service in order to run
# pg_rewind command next.
if restart:
self.app.restart()
def make_read_only(self, context, read_only):
"""There seems to be no way to flag this at the database level in
PostgreSQL at the moment -- see discussion here:
http://www.postgresql.org/message-id/flat/CA+TgmobWQJ-GCa_tWUc4=80A
1RJ2_+Rq3w_MqaVguk_q018dqw@mail.gmail.com#CA+TgmobWQJ-GCa_tWUc4=80A1RJ
2_+Rq3w_MqaVguk_q018dqw@mail.gmail.com
"""
pass
def get_latest_txn_id(self, context):
if self.app.is_replica():
lsn = self.app.get_last_wal_replay_lsn()
else:
lsn = self.app.get_current_wal_lsn()
LOG.info("Last wal location found: %s", lsn)
return lsn
def wait_for_txn(self, context, txn):
if not self.app.is_replica():
raise exception.TroveError("Attempting to wait for a txn on a "
"non-replica server")
def _wait_for_txn():
lsn = self.app.get_last_wal_replay_lsn()
LOG.info("Last wal location found: %s", lsn)
return lsn >= txn
try:
utils.poll_until(_wait_for_txn, time_out=60)
except exception.PollTimeOut:
raise exception.TroveError(
f"Timeout occurred waiting for wal offset to change to {txn}")
|
|
#!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..','libs'))
sys.path.insert(0, os.path.join(here, '..', '..','external_libs'))
#============================ verify installation =============================
from SmartMeshSDK.utils import SmsdkInstallVerifier
(goodToGo,reason) = SmsdkInstallVerifier.verifyComponents(
[
SmsdkInstallVerifier.PYTHON,
SmsdkInstallVerifier.PYSERIAL,
]
)
if not goodToGo:
print "Your installation does not allow this application to run:\n"
print reason
raw_input("Press any button to exit")
sys.exit(1)
#============================ imports =========================================
import threading
import copy
import time
import traceback
from SmartMeshSDK.utils import AppUtils, \
FormatUtils, \
LatencyCalculator
from SmartMeshSDK.ApiDefinition import IpMgrDefinition, \
HartMgrDefinition
from SmartMeshSDK.IpMgrConnectorMux import IpMgrSubscribe, \
IpMgrConnectorMux
from SmartMeshSDK.ApiException import APIError
from SmartMeshSDK.protocols.oap import OAPDispatcher, \
OAPClient, \
OAPMessage, \
OAPNotif
from dustUI import dustWindow, \
dustFrameApi, \
dustFrameConnection, \
dustFrameMoteList, \
dustFrameText, \
dustStyle
#============================ logging =========================================
# local
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('App')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
# global
AppUtils.configureLogging()
#============================ defines =========================================
GUI_UPDATEPERIOD = 250 # in ms
# columns names
COL_LED = 'toggle led'
COL_NOTIF_DATA = IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA
COL_NOTIF_IPDATA = IpMgrSubscribe.IpMgrSubscribe.NOTIFIPDATA
COL_NOTIF_HR = IpMgrSubscribe.IpMgrSubscribe.NOTIFHEALTHREPORT
COL_LAT_MIN = 'lat. min'
COL_LAT_CUR = 'lat. current'
COL_LAT_MAX = 'lat. max'
COL_NOTIF_CLR = 'clear counters'
COL_TEMPERATURE = 'temperature'
COL_TEMP_NUM = 'num. temp'
COL_TEMP_CLR = 'clear temp'
COL_TEMP_RATE = 'publish rate (ms)'
#============================ body ============================================
##
# \addtogroup TempMonitor
# \{
#
class HartMgrSubscriber(threading.Thread):
def __init__(self,connector,dataCb,eventCb,disconnectedCb):
# log
log.debug("Initialize HartMgrSubscriber")
# record variables
self.connector = connector
self.dataCb = dataCb
self.eventCb = eventCb
self.disconnectedCb = disconnectedCb
# initialize parent class
threading.Thread.__init__(self)
# give this thread a name
self.name = "HartMgrSubscriber"
# subscribe to data
self.connector.dn_subscribe('data event-network')
def run(self):
log.debug("HartMgrSubscriber starts running")
keepListening = True
while keepListening:
try:
input = self.connector.getNotificationInternal(-1)
except (ConnectionError,QueueError) as err:
keepListening = False
else:
if input:
log.debug("HartMgrSubscriber received {0}".format(input))
if input[0][0] in ['data']:
self.dataCb('.'.join(input[0]),input[1])
elif input[0][0] in ['event']:
self.eventCb(input[0][1],input[1])
else:
raise SystemError("No callback for {0}".format(input[0]))
else:
keepListening = False
self.disconnectedCb()
class notifClient(object):
def __init__(self, apiDef, connector, disconnectedCallback, latencyCalculator):
# store params
self.apiDef = apiDef
self.connector = connector
self.disconnectedCallback = disconnectedCallback
self.latencyCalculator = latencyCalculator
# log
log.debug("Initialize notifClient")
# variables
self.dataLock = threading.Lock()
self.isMoteActive = {}
self.data = {}
self.updates = {}
# subscriber
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# we are connected to an IP manager
self.subscriber = IpMgrSubscribe.IpMgrSubscribe(self.connector)
self.subscriber.start()
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
],
fun = self._dataCallback,
isRlbl = False,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFEVENT,
],
fun = self._eventCallback,
isRlbl = True,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.ERROR,
IpMgrSubscribe.IpMgrSubscribe.FINISH,
],
fun = self.disconnectedCallback,
isRlbl = True,
)
elif isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
# we are connected to a HART manager
self.subscriber = HartMgrSubscriber(self.connector,
self._dataCallback,
self._eventCallback,
self.disconnectedCallback)
self.subscriber.start()
else:
output = "apiDef of type {0} unexpected".format(type(self.apiDef))
log.critical(output)
print output
raise SystemError(output)
# OAP dispatcher
self.oap_dispatch = OAPDispatcher.OAPDispatcher()
self.oap_dispatch.register_notif_handler(self._handle_oap_notif)
#======================== public ==========================================
def getData(self):
self.dataLock.acquire()
returnIsMoteActive = copy.deepcopy(self.isMoteActive)
returnData = copy.deepcopy(self.data)
returnUpdates = copy.deepcopy(self.updates)
self.updates = {}
self.dataLock.release()
return (returnIsMoteActive,returnData,returnUpdates)
def getOapDispatcher(self):
return self.oap_dispatch
def clearNotifCounters(self,mac):
self.dataLock.acquire()
self.updates = {}
if mac in self.data:
self.updates[mac] = []
for k,v in self.data[mac].items():
if k in [COL_NOTIF_DATA,
COL_NOTIF_IPDATA,
COL_NOTIF_HR]:
self.updates[mac].append(k)
self.data[mac][k] = 0
elif k in [COL_LAT_MIN,
COL_LAT_CUR,
COL_LAT_MAX,]:
self.updates[mac].append(k)
self.data[mac][k] = '-'
self.dataLock.release()
def clearTemp(self,mac):
self.dataLock.acquire()
self.updates = {}
if mac in self.data:
self.updates[mac] = []
for k,v in self.data[mac].items():
if k in [COL_TEMP_NUM,]:
self.updates[mac].append(k)
self.data[mac][k] = 0
if k in [COL_TEMPERATURE,]:
self.updates[mac].append(k)
self.data[mac][k] = '-'
self.dataLock.release()
def disconnect(self):
self.connector.disconnect()
#======================== private =========================================
def _dataCallback(self, notifName, notifParams):
# log
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# IpMgrSubscribe generates a named tuple
log.debug(
"notifClient._dataCallback {0}:\n{1}".format(
notifName,
FormatUtils.formatNamedTuple(notifParams)
)
)
elif isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
# HartMgrSubscriber generates a dictionary
log.debug(
"notifClient._dataCallback {0}:\n{1}".format(
notifName,
FormatUtils.formatDictionnary(notifParams)
)
)
else:
output = "apiDef of type {0} unexpected".format(type(self.apiDef))
log.critical(output)
print output
raise SystemError(output)
# record current time
timeNow = time.time()
# read MAC address from notification
mac = self._getMacFromNotifParams(notifParams)
# lock the data structure
self.dataLock.acquire()
# add mac/type to data, if necessary
if mac not in self.data:
self.data[mac] = {}
if notifName not in self.data[mac]:
self.data[mac][notifName] = 0
# add mac/type to updates, if necessary
if mac not in self.updates:
self.updates[mac] = []
if notifName not in self.updates[mac]:
self.updates[mac].append(notifName)
# increment counter
self.data[mac][notifName] += 1
# transform HART OAP notification into equivalent IP version
if isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
# we are connected to a HART manager
if (notifName in ['data']) and (len(notifParams['payload'])>2):
notifName = IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA
notifParams = IpMgrConnectorMux.IpMgrConnectorMux.Tuple_notifData(
utcSecs = int(notifParams['time']/1000),
utcUsecs = (notifParams['time']%1000)*1000,
macAddress = mac,
srcPort = OAPMessage.OAP_PORT,
dstPort = OAPMessage.OAP_PORT,
data = tuple(notifParams['payload'][2:]),
)
# calculate latency
try:
if notifName in [IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
IpMgrSubscribe.IpMgrSubscribe.NOTIFIPDATA,]:
try:
latency = self.latencyCalculator.getLatency(
float(notifParams.utcSecs)+(float(notifParams.utcUsecs)/1000000.0),
timeNow)
# lat. current
if COL_LAT_CUR not in self.data[mac]:
self.data[mac][COL_LAT_CUR] = '-'
if COL_LAT_CUR not in self.updates[mac]:
self.updates[mac].append(COL_LAT_CUR)
self.data[mac][COL_LAT_CUR] = latency
# lat. min
if (
(
(COL_LAT_MIN in self.data[mac])
and
(latency<self.data[mac][COL_LAT_MIN])
)
or
(
(COL_LAT_MIN not in self.data[mac])
)
or
(
(COL_LAT_MIN in self.data[mac])
and
(self.data[mac][COL_LAT_MIN]=='-')
)
):
if COL_LAT_MIN not in self.data[mac]:
self.data[mac][COL_LAT_MIN] = '-'
if COL_LAT_MIN not in self.updates[mac]:
self.updates[mac].append(COL_LAT_MIN)
self.data[mac][COL_LAT_MIN] = latency
# max
if (
(
(COL_LAT_MAX in self.data[mac])
and
(latency>self.data[mac][COL_LAT_MAX])
)
or
(
(COL_LAT_MAX not in self.data[mac])
)
or
(
(COL_LAT_MAX in self.data[mac])
and
(self.data[mac][COL_LAT_MAX]=='-')
)
):
if COL_LAT_MAX not in self.data[mac]:
self.data[mac][COL_LAT_MAX] = '-'
if COL_LAT_MAX not in self.updates[mac]:
self.updates[mac].append(COL_LAT_MAX)
self.data[mac][COL_LAT_MAX] = latency
except RuntimeError:
# can happen if latency calculator hasn't acquired lock yet
pass
except Exception as err:
print err
# unlock the data structure
self.dataLock.release()
# parse OAP packet
if notifName in [IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA]:
self.oap_dispatch.dispatch_pkt(notifName, notifParams)
def _eventCallback(self, notifName, notifParams):
try:
# log
log.debug("notifClient._eventCallback {0} {1}".format(notifName, notifParams))
# lock the data structure
self.dataLock.acquire()
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
if notifName in [IpMgrSubscribe.IpMgrSubscribe.EVENTMOTEOPERATIONAL]:
mac = self._getMacFromNotifParams(notifParams)
self.isMoteActive[mac] = True
if notifName in [IpMgrSubscribe.IpMgrSubscribe.EVENTMOTELOST]:
mac = self._getMacFromNotifParams(notifParams)
self.isMoteActive[mac] = False
elif isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
if notifName in ['MoteLive']:
mac = self._getMacFromNotifParams(notifParams)
self.isMoteActive[mac] = True
if notifName in ['MoteUnknown','MoteDisconnect','MoteJoin']:
mac = self._getMacFromNotifParams(notifParams)
self.isMoteActive[mac] = False
else:
output = "apiDef of type {0} unexpected".format(type(self.apiDef))
log.critical(output)
print output
raise SystemError(output)
except Exception as err:
output = traceback.format_exc()
print output
log.critical(output)
finally:
# unlock the data structure
self.dataLock.release()
def _getMacFromNotifParams(self,notifParams):
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# we are connected to an IP manager
return tuple(notifParams.macAddress)
elif isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
# we are connected to a HART manager
return tuple([int(i,16) for i in notifParams['macAddr'].split('-')])
else:
output = "apiDef of type {0} unexpected".format(type(self.apiDef))
log.critical(output)
print output
raise SystemError(output)
def _handle_oap_notif(self,mac,notif):
# convert MAC to tuple
mac = tuple(mac)
if isinstance(notif,OAPNotif.OAPTempSample):
# this is a temperature notification
# lock the data structure
self.dataLock.acquire()
# add mac/type to updates, if necessary
if mac not in self.data:
self.data[mac] = {}
if COL_TEMPERATURE not in self.data[mac]:
self.data[mac][COL_TEMPERATURE] = None
if COL_TEMP_NUM not in self.data[mac]:
self.data[mac][COL_TEMP_NUM] = 0
# add mac/type to updates, if necessary
if mac not in self.updates:
self.updates[mac] = []
if COL_TEMPERATURE not in self.updates[mac]:
self.updates[mac].append(COL_TEMPERATURE)
if COL_TEMP_NUM not in self.updates[mac]:
self.updates[mac].append(COL_TEMP_NUM)
self.data[mac][COL_TEMPERATURE] = notif.samples[0]
self.data[mac][COL_TEMP_NUM] += 1
# unlock the data structure
self.dataLock.release()
class TempMonitorGui(object):
def __init__(self):
# local variables
self.guiLock = threading.Lock()
self.apiDef = IpMgrDefinition.IpMgrDefinition()
self.notifClientHandler = None
self.latencyCalculator = None
self.guiUpdaters = 0
self.oap_clients = {}
# create window
self.window = dustWindow.dustWindow('TempMonitor',
self._windowCb_close)
# add a API selection frame
self.apiFrame = dustFrameApi.dustFrameApi(
self.window,
self.guiLock,
self._apiFrameCb_apiLoaded,
row=0,column=0,
deviceType=dustFrameApi.dustFrameApi.MANAGER)
self.apiFrame.show()
# add a connection frame
self.connectionFrame = dustFrameConnection.dustFrameConnection(
self.window,
self.guiLock,
self._connectionFrameCb_connected,
frameName="manager connection",
row=1,column=0)
# add a mote list frame
columnnames = [
# led
{
'name': COL_LED,
'type': dustFrameMoteList.dustFrameMoteList.ACTION,
},
# counters and latency
{
'name': COL_NOTIF_DATA,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_NOTIF_IPDATA,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_NOTIF_HR,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_LAT_MIN,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_LAT_CUR,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_LAT_MAX,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_NOTIF_CLR,
'type': dustFrameMoteList.dustFrameMoteList.ACTION,
},
# temperature
{
'name': COL_TEMPERATURE,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_TEMP_NUM,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_TEMP_CLR,
'type': dustFrameMoteList.dustFrameMoteList.ACTION,
},
{
'name': COL_TEMP_RATE,
'type': dustFrameMoteList.dustFrameMoteList.GETSETONEVAL,
},
]
self.moteListFrame = dustFrameMoteList.dustFrameMoteList(self.window,
self.guiLock,
columnnames,
row=2,column=0)
self.moteListFrame.show()
# add a status (text) frame
self.statusFrame = dustFrameText.dustFrameText(
self.window,
self.guiLock,
frameName="status",
row=3,column=0)
self.statusFrame.show()
#======================== public ==========================================
def start(self):
# log
log.debug("Starting TempMonitorGui")
# start Tkinter's main thead
try:
self.window.mainloop()
except SystemExit:
sys.exit()
#======================== private =========================================
#===== user interaction
def _apiFrameCb_apiLoaded(self,apiDefLoaded):
'''
\brief Called when an API is selected.
'''
# log
log.debug("_apiFrameCb_apiLoaded")
# record the loaded API
self.apiDef = apiDefLoaded
# tell other frames about it
self.connectionFrame.apiLoaded(self.apiDef)
# display frames
self.connectionFrame.show()
# update status
self.statusFrame.write("API {0} loaded successfully.".format(type(apiDefLoaded)))
def _connectionFrameCb_connected(self,connector):
'''
\brief Called when the connectionFrame has connected.
'''
# log
log.debug("_connectionFrameCb_connected")
# store the connector
self.connector = connector
# start a latency calculator
self.latencyCalculator = LatencyCalculator.LatencyCalculator(self.apiDef,self.connector)
self.latencyCalculator.start()
# start a notification client
self.notifClientHandler = notifClient(
self.apiDef,
self.connector,
self._connectionFrameCb_disconnected,
self.latencyCalculator,
)
# retrieve list of motes from manager
macs = self._getOperationalMotesMacAddresses()
for mac in macs:
self._addNewMote(mac)
# clear the colors on the GUI
self.moteListFrame.clearColors()
# schedule the GUI to update itself in GUI_UPDATEPERIOD ms
if self.guiUpdaters==0:
self.moteListFrame.after(GUI_UPDATEPERIOD,self._updateMoteList)
self.guiUpdaters += 1
# update status
self.statusFrame.write("Connection to manager successful.")
def _moteListFrameCb_toggleLed(self,mac,button):
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# find out whether to switch LED on of off
if button.cget("text")=='ON':
val = 1
button.configure(text="OFF")
else:
val = 0
button.configure(text="ON")
# send the OAP message
try:
self.oap_clients[mac].send( OAPMessage.CmdType.PUT, # command
[3,2], # address
data_tags=[OAPMessage.TLVByte(t=0,v=val)], # parameters
cb=None, # callback
)
except APIError as err:
self.statusFrame.write("[WARNING] {0}".format(err))
else:
# update status
self.statusFrame.write(
"Toggle LED command sent successfully to mote {0}.".format(
FormatUtils.formatMacString(mac),
)
)
else:
button.configure(text="N.A.")
# update status
self.statusFrame.write("This feature is only present in SmartMesh IP")
def _moteListFrameCb_clearCtrs(self,mac,button):
# clear the counters
self.notifClientHandler.clearNotifCounters(mac)
# update status
self.statusFrame.write(
"Counters for mote {0} cleared successfully.".format(
FormatUtils.formatMacString(mac),
)
)
def _moteListFrameCb_clearTemp(self,mac,button):
# clear the temperature data
self.notifClientHandler.clearTemp(mac)
# update status
self.statusFrame.write(
"Temperature data for mote {0} cleared successfully.".format(
FormatUtils.formatMacString(mac),
)
)
def _moteListFrameCb_rateGet(self,mac):
# send the OAP message
try:
self.oap_clients[mac].send( OAPMessage.CmdType.GET, # command
[5], # address
data_tags=None, # parameters
cb=self._oap_rateGet_resp, # callback
)
except APIError as err:
self.statusFrame.write("[WARNING] {0}".format(err))
else:
# update status
self.statusFrame.write(
"Publish rate get request sent successfully to mote {0}.".format(
FormatUtils.formatMacString(mac),
)
)
def _moteListFrameCb_rateSet(self,mac,val):
# send the OAP message
try:
self.oap_clients[mac].send( OAPMessage.CmdType.PUT, # command
[5], # address
data_tags=[OAPMessage.TLVByte(t=0,v=1),
OAPMessage.TLVLong(t=1,v=val),],# parameters
cb=None, # callback
)
except APIError as err:
self.statusFrame.write("[WARNING] {0}".format(err))
else:
# update status
self.statusFrame.write(
"Publish rate set({0}) request sent successfully to mote {1}.".format(
val,
FormatUtils.formatMacString(mac),
)
)
def _connectionFrameCb_disconnected(self,notifName=None,notifParams=None):
'''
\brief Called when the connectionFrame has disconnected.
'''
# kill the latency calculator thread
if self.latencyCalculator:
self.latencyCalculator.disconnect()
self.latencyCalculator = None
# update the GUI
self.connectionFrame.updateGuiDisconnected()
# delete the connector
self.connector = None
def _windowCb_close(self):
if self.latencyCalculator:
self.latencyCalculator.disconnect()
if self.notifClientHandler:
self.notifClientHandler.disconnect()
#===== helpers
def _getOperationalMotesMacAddresses(self):
returnVal = []
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# we are connected to an IP manager
currentMac = (0,0,0,0,0,0,0,0) # start getMoteConfig() iteration with the 0 MAC address
continueAsking = True
while continueAsking:
try:
res = self.connector.dn_getMoteConfig(currentMac,True)
except APIError:
continueAsking = False
else:
if ((not res.isAP) and (res.state in [4,])):
returnVal.append(tuple(res.macAddress))
currentMac = res.macAddress
elif isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
# we are connected to a HART manager
for m in self.connector.dn_getMotes():
if ((not m.isAccessPoint) and (m.state in ['Operational',])):
returnVal.append(tuple([int(i,16) for i in m.macAddr.split('-')]))
else:
output = "apiDef of type {0} unexpected".format(type(self.apiDef))
log.critical(output)
print output
raise SystemError(output)
# order by increasing MAC address
returnVal.sort()
return returnVal
def _addNewMote(self,mac):
# add mote to GUI
# Note: if you're reconnecting, mote already exists
columnvals = {
# led
COL_LED: {
'text': 'ON',
'callback': self._moteListFrameCb_toggleLed,
},
# counters and latency
COL_NOTIF_DATA: 0,
COL_NOTIF_IPDATA: 0,
COL_NOTIF_HR: 0,
COL_LAT_MIN: '-',
COL_LAT_CUR: '-',
COL_LAT_MAX: '-',
COL_NOTIF_CLR: {
'text': 'clear',
'callback': self._moteListFrameCb_clearCtrs,
},
# temperature
COL_TEMPERATURE: '-',
COL_TEMP_NUM: 0,
COL_TEMP_CLR: {
'text': 'clear',
'callback': self._moteListFrameCb_clearTemp,
},
COL_TEMP_RATE: {
'min': 1000,
'max': 60000,
'cb_get': self._moteListFrameCb_rateGet,
'cb_set': self._moteListFrameCb_rateSet,
},
}
if mac not in self.oap_clients:
self.moteListFrame.addMote(
mac,
columnvals,
)
# create OAPClient
# Note: if you're reconnecting, this recreates the OAP client
self.oap_clients[mac] = OAPClient.OAPClient(mac,
self._sendDataToConnector,
self.notifClientHandler.getOapDispatcher())
def _oap_rateGet_resp(self,mac,oap_resp):
temp = OAPMessage.Temperature()
temp.parse_response(oap_resp)
self.moteListFrame.update(mac,COL_TEMP_RATE, temp.rate.value)
def _updateMoteList(self):
updatable_columns = [
COL_NOTIF_DATA,
COL_NOTIF_IPDATA,
COL_NOTIF_HR,
COL_LAT_MIN,
COL_LAT_CUR,
COL_LAT_MAX,
COL_TEMPERATURE,
COL_TEMP_NUM,
]
# get the data
(isMoteActive,data,updates) = self.notifClientHandler.getData()
# update the frame
for mac,data in data.items():
# detect new motes
if mac not in self.oap_clients:
self._addNewMote(mac)
# update
for columnname,columnval in data.items():
if columnname in updatable_columns:
if ((mac in updates) and (columnname in updates[mac])):
self.moteListFrame.update(mac,columnname,columnval)
# enable/disable motes
for mac in isMoteActive:
if isMoteActive[mac]:
self.moteListFrame.enableMote(mac)
else:
self.moteListFrame.disableMote(mac)
# schedule the next update
self.moteListFrame.after(GUI_UPDATEPERIOD,self._updateMoteList)
def _sendDataToConnector(self,mac,priority,srcPort,dstPort,options,data):
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# we are connected to an IP manager
self.connector.dn_sendData(
mac,
priority,
srcPort,
dstPort,
options,
data
)
elif isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
# we are connected to a HART manager
self.connector.dn_sendRequest(
'-'.join(["%.2x"%b for b in mac]), # macAddr (string)
'maintenance', # domain (string)
'low', # priority (string)
True, # reliable (string)
[0x00,0x00,0xfc,0x12]+data, # data (hex)
)
else:
output = "apiDef of type {0} unexpected".format(type(self.apiDef))
log.critical(output)
print output
raise SystemError(output)
#============================ main ============================================
def main():
TempMonitorGuiHandler = TempMonitorGui()
TempMonitorGuiHandler.start()
if __name__ == '__main__':
main()
##
# end of TempMonitor
# \}
#
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict, List
import torch
import tests.utils as test_utils
from fairseq import utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
TransformEosDataset,
data_utils,
noising,
)
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with continuation markers as suffixes to denote
non-end of word tokens. This is the standard BPE format used in
fairseq's preprocessing.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he@@")
vocab.add_symbol("llo")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("y@@")
vocab.add_symbol("ou")
vocab.add_symbol("n@@")
vocab.add_symbol("ew")
vocab.add_symbol("or@@")
vocab.add_symbol("k")
src_tokens = [
["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
["how", "are", "y@@", "ou"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with end-of-word markers as suffixes to denote
tokens at the end of a word. This is an alternative to fairseq's
standard preprocessing framework and is not generally supported
within fairseq.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he")
vocab.add_symbol("llo_EOW")
vocab.add_symbol("how_EOW")
vocab.add_symbol("are_EOW")
vocab.add_symbol("y")
vocab.add_symbol("ou_EOW")
vocab.add_symbol("n")
vocab.add_symbol("ew_EOW")
vocab.add_symbol("or")
vocab.add_symbol("k_EOW")
src_tokens = [
["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
["how_EOW", "are_EOW", "y", "ou_EOW"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_word_vocab(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: word vocab
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("you")
vocab.add_symbol("new")
vocab.add_symbol("york")
src_tokens = [
["hello", "new", "york", "you"],
["how", "are", "you", "new", "york"],
]
x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _convert_src_tokens_to_tensor(
self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
):
src_len = [len(x) for x in src_tokens]
# If we have to append EOS, we include EOS in counting src length
if append_eos:
src_len = [length + 1 for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][j + 1] = vocab.eos()
x = x.transpose(1, 0)
return x, torch.LongTensor(src_len)
def assert_eos_at_end(self, x, x_len, eos):
"""Asserts last token of every sentence in x is EOS"""
for i in range(len(x_len)):
self.assertEqual(
x[x_len[i] - 1][i],
eos,
(
"Expected eos (token id {eos}) at the end of sentence {i} "
"but got {other} instead"
).format(i=i, eos=eos, other=x[i][-1]),
)
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
# Expect only the first word (2 bpe tokens) of the first example
# was dropped out
self.assertEqual(x_len[0] - 2, l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[i + 2][0])
def test_word_dropout_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
# Expect only the first word (2 bpe tokens) of the first example
# was blanked out
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if i < 2:
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(
self,
x,
x_len,
max_shuffle_distance: int,
vocab: Dictionary,
expected_shufle_maps: List[Dict[int, int]],
expect_eos_at_end: bool,
bpe_end_marker=None,
):
"""
This verifies that with a given x, x_len, max_shuffle_distance, and
vocab, we get the expected shuffle result.
Args:
x: Tensor of shape (T x B) = (sequence_length, batch_size)
x_len: Tensor of length B = batch_size
max_shuffle_distance: arg to pass to noising
expected_shuffle_maps: List[mapping] where mapping is a
Dict[old_index, new_index], mapping x's elements from their
old positions in x to their new positions in x.
expect_eos_at_end: if True, check the output to make sure there is
an EOS at the end.
bpe_end_marker: str denoting the BPE end token. If this is not None, we
set the BPE cont token to None in the noising classes.
"""
bpe_cont_marker = None
if bpe_end_marker is None:
bpe_cont_marker = "@@"
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(
vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
)
x_noised, l_noised = word_shuffle.noising(
x, x_len, max_shuffle_distance=max_shuffle_distance
)
# For every example, we have a different expected shuffle map. We check
# that each example is shuffled as expected according to each
# corresponding shuffle map.
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for k, v in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
# Shuffling should not affect the length of each example
for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=True,
)
def test_word_shuffle_with_eos_nonbpe(self):
"""The purpose of this is to test shuffling logic with word vocabs"""
vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
{0: 0, 1: 1, 2: 3, 3: 2},
{0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
],
expect_eos_at_end=True,
)
def test_word_shuffle_without_eos(self):
"""Same result as word shuffle with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
"""Same result as word shuffle without eos except using BPE end token"""
vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
def assert_no_eos_at_end(self, x, x_len, eos):
"""Asserts that the last token of each sentence in x is not EOS"""
for i in range(len(x_len)):
self.assertNotEqual(
x[x_len[i] - 1][i],
eos,
"Expected no eos (token id {eos}) at the end of sentence {i}.".format(
eos=eos, i=i
),
)
def test_word_dropout_without_eos(self):
"""Same result as word dropout with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
"""Same result as word blank with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(
self,
src_tokens_no_pad,
src_dict,
append_eos_to_tgt=False,
):
"""
Constructs a NoisingDataset and the corresponding
``LanguagePairDataset(NoisingDataset(src), src)``. If
*append_eos_to_tgt* is True, wrap the source dataset in
:class:`TransformEosDataset` to append EOS to the clean source when
using it as the target.
"""
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(
src_dataset=src_dataset,
src_dict=src_dict,
seed=1234,
max_word_shuffle_distance=3,
word_dropout_prob=0.2,
word_blanking_prob=0.2,
noising_class=noising.UnsupervisedMTNoising,
)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(
src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
)
language_pair_dataset = TransformEosDataset(
language_pair_dataset,
src_dict.eos(),
append_eos_to_tgt=append_eos_to_tgt,
)
dataloader = torch.utils.data.DataLoader(
dataset=language_pair_dataset,
batch_size=2,
collate_fn=language_pair_dataset.collater,
)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=True
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
"""
Similar to test noising dataset with eos except that we have to set
*append_eos_to_tgt* to ``True``.
"""
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=False
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad,
src_dict=src_dict,
append_eos_to_tgt=True,
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
|
import logging
import spotipy
import spotipy.oauth2 as oauth2
import json
import datetime
from errbot import BotPlugin, botcmd
class BotifyPlugin(BotPlugin):
def activate(self):
from config import BOTIFY_CREDS
super(BotifyPlugin, self).activate()
# dict of name: id for playlists, name is the IRC channel
self.playlists = {}
creds = json.load(open(BOTIFY_CREDS))
client_id = creds.get('CLIENT_ID', 'YOUR_CLIENT_ID')
client_secret = creds.get('CLIENT_SECRET', 'YOUR_CLIENT_SECRET')
redirect_uri = creds.get('REDIRECT_URI', 'YOUR_REDIRECT_URI')
self.username = creds.get('USERNAME', 'USERNAME')
logging.info('Auth cache:' + creds.get('CACHE_PATH', self.username))
self.sp_oauth = oauth2.SpotifyOAuth(
client_id,
client_secret,
redirect_uri,
scope='playlist-modify-public',
cache_path=creds.get('CACHE_PATH', self.username)
)
@botcmd(split_args_with=None, admin_only=True)
def botify_createlist(self, mess, args):
self.oath_refresh_if_needed()
playlist = args[0]
return '%s created? %s' % (playlist, self.create_playlist(playlist))
@botcmd(split_args_with=None, admin_only=True)
def botify_auth(self, mess, args):
"""
Do the oauth challenge and response fandango
"""
r = self.oath_refresh_if_needed()
if 'expired' not in r:
expires = self.token_expires()
return "%s. Expires @ %s" % (r, expires.strftime('%H:%M:%S'))
try:
if args:
return self.oauth_validate(args[0])
else:
ed = "http://imgur.com/A8QOnaR.jpg"
return "You have 30 seconds to comply %s\n %s" % (
ed,
self.oauth_challenge()
)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
@botcmd(split_args_with=None, admin_only=True)
def botify_authcheck(self, mess, args):
self.oath_refresh_if_needed()
expires = self.token_expires()
return "Expires @ %s" % expires.strftime('%H:%M:%S')
@botcmd
def botify_list(self, mess, args):
self.oath_refresh_if_needed()
playlist = self.playlist_id(mess)
msg = "Listen along: http://open.spotify.com/user/%s/playlist/%s"
results = [msg % (self.username, playlist), "-----"]
if playlist:
playlist_tracks = self.list_tracks(playlist)
if len(playlist_tracks) == 0:
results.append("No tracks in playlist")
else:
for d in playlist_tracks:
logging.info(d)
s = '%s : %s (%s) - [%s]' % (
d['track']['name'],
d['track']['album']['name'],
', '.join([a['name'] for a in d['track']['artists']]),
d['track']['id']
)
results.append(s)
else:
results = ["No playlist for the room"]
for d in results:
yield d.encode('ascii', 'ignore')
@botcmd
def botify_search(self, mess, args):
results = []
try:
results = self.search(args)
except spotipy.SpotifyException, e:
logging.error(e)
yield 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
else:
for d in results:
s = '%s : %s (%s)- %s' % (
d['name'],
d['album']['name'],
', '.join([a['name'] for a in d['artists']]),
d['id'])
yield s.encode('ascii', 'ignore')
@botcmd
def botify_add(self, mess, args):
self.oath_refresh_if_needed()
playlist = self.playlist_id(mess)
if not playlist:
return "No playlist for the room"
try:
if playlist:
return self.add_track(playlist, args.split(' '))
else:
return "No playlist for the room"
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
@botcmd
def botify_delete(self, mess, args):
self.oath_refresh_if_needed()
playlist = self.playlist_id(mess)
try:
if playlist:
return self.delete_track(playlist, args.split(' '))
else:
return "No playlist for the room"
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
def search(self, term, limit=10):
try:
tracks = self.sp.search(q=term, limit=limit)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
return tracks['tracks']['items']
def delete_track(self, playlist, track_ids):
logging.info("delete tracks: %s" % track_ids)
track_ids = ["spotify:track:%s" % t for t in track_ids]
try:
self.sp.user_playlist_delete_tracks(
self.username,
playlist,
track_ids
)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
return "Track removed"
def add_track(self, playlist, track_ids):
logging.info("adding tracks: %s" % track_ids)
track_ids = ["spotify:track:%s" % t for t in track_ids]
try:
self.sp.user_playlist_add_tracks(
self.username,
playlist,
track_ids
)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
return "Track added"
def list_tracks(self, playlist):
return self.sp.user_playlist(
self.username,
playlist,
fields="tracks,next"
)['tracks']['items']
def check_playlist(self, playlist):
playlists = self.sp.user_playlists(self.username)['items']
self.playlists = dict([(p['name'], p['id']) for p in playlists])
return playlist in self.playlists
def create_playlist(self, playlist):
if not self.check_playlist(playlist):
logging.info('creating playlist: %s' % playlist)
try:
playlist = self.sp.user_playlist_create(
self.username,
playlist
)
except spotipy.SpotifyException, e:
if e.http_status == 201:
# there's a bug in spotipy that thinks a 201 is bad...
return self.check_playlist(playlist)
else:
return False
def playlist_id(self, mess):
playlist = str(mess.getFrom())
if self.check_playlist(playlist):
return self.playlists[playlist]
else:
return False
def oauth_challenge(self):
return self.sp_oauth.get_authorize_url()
def oauth_validate(self, response):
try:
logging.info("botify validating oauth response: %s" % response)
code = self.sp_oauth.parse_response_code(response)
logging.info("botify oauth code: %s" % code)
token = self.sp_oauth.get_access_token(code)
if token:
self.sp = spotipy.Spotify(auth=token['access_token'])
expires = datetime.datetime.fromtimestamp(token['expires_at'])
return "Authorised. Expires @ %s" % expires.strftime(
'%H:%M:%S'
)
else:
return "http://i.imgur.com/s5guP5z.gif"
except spotipy.SpotifyException, e:
logging.error(e)
return "http://i.imgur.com/s5guP5z.gif"
def oath_refresh_if_needed(self):
expires = self.token_expires()
delta = expires - datetime.datetime.now()
if delta != abs(delta):
return "Token expired, reauth"
if delta.seconds < 300:
token_info = self.sp_oauth.get_cached_token()
print token_info['expires_at'], token_info['refresh_token']
self.sp_oauth.refresh_access_token(
token_info['refresh_token']
)
token_info = self.sp_oauth.get_cached_token()
print token_info['expires_at'], token_info['refresh_token']
return "Token refreshed"
def token_expires(self):
token = self.sp_oauth.get_cached_token()
return datetime.datetime.fromtimestamp(token['expires_at'])
|
|
from datetime import datetime
from os import path
from django.core.urlresolvers import NoReverseMatch
from django.test.utils import override_settings
from nose.tools import eq_, ok_
import amo.tests
from mkt.comm.models import (CommAttachment, CommunicationNote,
CommunicationThread, CommunicationThreadCC,
CommunicationThreadToken, user_has_perm_app,
user_has_perm_note, user_has_perm_thread)
from mkt.comm.tests.test_views import CommTestMixin
from mkt.constants import comm as const
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
TESTS_DIR = path.dirname(path.abspath(__file__))
ATTACHMENTS_DIR = path.join(TESTS_DIR, 'attachments')
class PermissionTestMixin(object):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.addon = Webapp.objects.get()
self.user = UserProfile.objects.get(username='regularuser')
self.thread = CommunicationThread.objects.create(addon=self.addon)
self.author = UserProfile.objects.create(email='lol', username='lol')
self.note = CommunicationNote.objects.create(
thread=self.thread, author=self.author, note_type=0, body='xyz')
self.obj = None
def _eq_obj_perm(self, val):
if self.type == 'note':
eq_(user_has_perm_note(self.obj, self.user), val)
else:
eq_(user_has_perm_thread(self.obj, self.user), val)
def test_no_perm(self):
self._eq_obj_perm(False)
def test_has_perm_public(self):
self.obj.update(read_permission_public=True)
self._eq_obj_perm(True)
def test_has_perm_dev(self):
self.obj.update(read_permission_developer=True)
self.addon.addonuser_set.create(user=self.user)
self._eq_obj_perm(True)
def test_has_perm_rev(self):
self.obj.update(read_permission_reviewer=True)
self.grant_permission(self.user, 'Apps:Review')
self._eq_obj_perm(True)
def test_has_perm_senior_rev(self):
self.obj.update(read_permission_senior_reviewer=True)
self.grant_permission(self.user, 'Apps:ReviewEscalated')
self._eq_obj_perm(True)
def test_has_perm_moz_contact(self):
self.obj.update(read_permission_mozilla_contact=True)
self.addon.update(
mozilla_contact=','.join([self.user.email, 'lol@lol.com']))
self._eq_obj_perm(True)
def test_has_perm_staff(self):
self.obj.update(read_permission_staff=True)
self.grant_permission(self.user, 'Admin:*')
self._eq_obj_perm(True)
class TestCommunicationNote(PermissionTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestCommunicationNote, self).setUp()
self.type = 'note'
self.obj = self.note
def test_has_perm_author(self):
self.obj.update(author=self.user)
self._eq_obj_perm(True)
def test_has_perm_no_author(self):
self.obj.update(author=None)
self._eq_obj_perm(False)
def test_manager(self):
eq_(CommunicationNote.objects.count(), 1)
eq_(CommunicationNote.objects.with_perms(self.user,
self.thread).count(), 0)
self.note.update(author=self.user)
eq_(CommunicationNote.objects.with_perms(self.user,
self.thread).count(), 1)
class TestCommunicationThread(PermissionTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestCommunicationThread, self).setUp()
self.type = 'thread'
self.obj = self.thread
def test_has_perm_posted(self):
self.note.update(author=self.user)
self._eq_obj_perm(True)
def test_has_perm_cc(self):
CommunicationThreadCC.objects.create(user=self.user, thread=self.obj)
self._eq_obj_perm(True)
def test_has_perm_app_reviewer(self):
ok_(not user_has_perm_app(self.user, self.addon))
self.grant_permission(self.user, 'Apps:Review')
ok_(user_has_perm_app(self.user, self.addon))
def test_has_perm_app_developer(self):
ok_(not user_has_perm_app(self.user, self.addon))
self.addon.addonuser_set.create(user=self.user)
ok_(user_has_perm_app(self.user, self.addon))
class TestThreadTokenModel(amo.tests.TestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
addon = Webapp.objects.get(pk=337141)
self.thread = CommunicationThread(addon=addon)
user = UserProfile.objects.all()[0]
self.token = CommunicationThreadToken(thread=self.thread, user=user)
self.token.modified = datetime.now()
self.token.use_count = 0
def test_live_thread_token_is_valid(self):
"""
Test `is_valid()` when the token is fresh (not expired).
"""
assert self.token.is_valid()
def test_expired_thread_token_is_valid(self):
"""
Test `is_valid()` when the token has expired.
"""
self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)
assert not self.token.is_valid()
def test_unused_token_is_valid(self):
"""
Test `is_valid()` when the token is unused.
"""
assert self.token.is_valid()
def test_max_used_thread_token_is_valid(self):
"""
Test `is_valid()` when the token has been fully used.
"""
self.token.use_count = const.MAX_TOKEN_USE_COUNT
assert not self.token.is_valid()
def test_reset_uuid(self):
"""
Test `reset_uuid()` generates a differ uuid.
"""
self.thread.save()
self.token.thread = self.thread
self.token.save()
uuid = self.token.uuid
assert uuid
self.token.reset_uuid()
assert self.token.uuid
assert uuid != self.token.uuid
@override_settings(REVIEWER_ATTACHMENTS_PATH=ATTACHMENTS_DIR)
class TestCommAttachment(amo.tests.TestCase, CommTestMixin):
fixtures = fixture('webapp_337141')
XSS_STRING = 'MMM <script>alert(bacon);</script>'
def setUp(self):
self.user = amo.tests.user_factory(username='porkbelly')
amo.set_user(self.user)
self.profile = self.user
self.addon = Webapp.objects.get()
self.version = self.addon.latest_version
self.thread = self._thread_factory()
self.note = self._note_factory(self.thread)
self.attachment1, self.attachment2 = self._attachments(self.note)
def _attachments(self, note):
"""
Create and return a tuple of CommAttachment instances.
"""
ala1 = CommAttachment.objects.create(note=note,
filepath='bacon.txt',
mimetype='text/plain')
ala2 = CommAttachment.objects.create(note=note,
filepath='bacon.jpg',
description=self.XSS_STRING,
mimetype='image/jpeg')
return ala1, ala2
def test_filename(self):
msg = 'CommAttachment().filename() returning incorrect filename.'
eq_(self.attachment1.filename(), 'bacon.txt', msg)
eq_(self.attachment2.filename(), 'bacon.jpg', msg)
def test_full_path_dirname(self):
msg = 'CommAttachment().full_path() returning incorrect path.'
FAKE_PATH = '/tmp/attachments/'
with self.settings(REVIEWER_ATTACHMENTS_PATH=FAKE_PATH):
eq_(self.attachment1.full_path(), FAKE_PATH + 'bacon.txt', msg)
eq_(self.attachment2.full_path(), FAKE_PATH + 'bacon.jpg', msg)
def test_display_name(self):
msg = ('CommAttachment().display_name() returning '
'incorrect display name.')
eq_(self.attachment1.display_name(), 'bacon.txt', msg)
def test_display_name_xss(self):
ok_('<script>' not in self.attachment2.display_name())
def test_is_image(self):
msg = 'CommAttachment().is_image() not correctly detecting images.'
eq_(self.attachment1.is_image(), False, msg)
eq_(self.attachment2.is_image(), True, msg)
def test_get_absolute_url(self):
try:
self.attachment1.get_absolute_url()
self.attachment2.get_absolute_url()
except NoReverseMatch:
assert False, 'CommAttachment.get_absolute_url NoReverseMatch'
|
|
# file test_fedora/test_views.py
#
# Copyright 2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock, patch
import os
import unittest
from django.conf import settings
from django.http import Http404
from eulfedora.models import DigitalObject, Datastream, FileDatastream
from eulfedora.server import Repository, FEDORA_PASSWORD_SESSION_KEY
from eulfedora.views import raw_datastream, login_and_store_credentials_in_session, \
datastream_etag, datastream_lastmodified, raw_audit_trail
from eulfedora import cryptutil
TEST_PIDSPACE = getattr(settings, 'FEDORA_PIDSPACE', 'testme')
class SimpleDigitalObject(DigitalObject):
CONTENT_MODELS = ['info:fedora/%s:SimpleDjangoCModel' % TEST_PIDSPACE]
# NOTE: distinguish from SimpleCModel in non-django fedora unit tests
# and use configured pidspace for automatic clean-up
# extend digital object with datastreams for testing
text = Datastream("TEXT", "Text datastream", defaults={
'mimetype': 'text/plain',
})
image = FileDatastream('IMAGE', 'managed binary image datastream', defaults={
'mimetype': 'image/png'
})
class FedoraViewsTest(unittest.TestCase):
def setUp(self):
# load test object to test views with
repo = Repository()
self.obj = repo.get_object(type=SimpleDigitalObject)
self.obj.dc.content.title = 'test object for generic views'
self.obj.text.content = 'sample plain-text content'
img_file = os.path.join(settings.FEDORA_FIXTURES_DIR, 'test.png')
self.obj.image.content = open(img_file)
# force datastream checksums so we can test response headers
for ds in [self.obj.dc, self.obj.rels_ext, self.obj.text, self.obj.image]:
ds.checksum_type = 'MD5'
self.obj.save()
def tearDown(self):
self.obj.api.purgeObject(self.obj.pid)
def test_raw_datastream(self):
rqst = Mock()
rqst.method = 'GET'
# return empty headers for ETag condition check
rqst.META = {}
# rqst.META.get.return_value = None
# DC
response = raw_datastream(rqst, self.obj.pid, 'DC')
expected, got = 200, response.status_code
content = response.content
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream view of DC' \
% (expected, got))
expected, got = 'text/xml', response['Content-Type']
self.assertEqual(expected, got,
'Expected %s but returned %s for mimetype on raw_datastream view of DC' \
% (expected, got))
self.assertEqual(self.obj.dc.checksum, response['ETag'],
'datastream checksum should be set as ETag header in the response')
self.assertEqual(self.obj.dc.checksum, response['Content-MD5'])
self.assert_('<dc:title>%s</dc:title>' % self.obj.dc.content.title in content)
# RELS-EXT
response = raw_datastream(rqst, self.obj.pid, 'RELS-EXT')
expected, got = 200, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream view of RELS-EXT' \
% (expected, got))
expected, got = 'application/rdf+xml', response['Content-Type']
self.assertEqual(expected, got,
'Expected %s but returned %s for mimetype on raw_datastream view of RELS-EXT' \
% (expected, got))
# TEXT (non-xml content)
response = raw_datastream(rqst, self.obj.pid, 'TEXT')
expected, got = 200, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream view of TEXT' \
% (expected, got))
expected, got = 'text/plain', response['Content-Type']
self.assertEqual(expected, got,
'Expected %s but returned %s for mimetype on raw_datastream view of TEXT' \
% (expected, got))
# non-xml datastreams should have content-md5 & content-length headers
self.assertEqual(self.obj.text.checksum, response['Content-MD5'],
'datastream checksum should be set as Content-MD5 header in the response')
self.assertEqual(len(self.obj.text.content), int(response['Content-Length']))
# IMAGE (binary content)
response = raw_datastream(rqst, self.obj.pid, 'IMAGE')
expected, got = 200, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream view of IMAGE' \
% (expected, got))
expected, got = 'image/png', response['Content-Type']
self.assertEqual(expected, got,
'Expected %s but returned %s for mimetype on raw_datastream view of IMAGE' \
% (expected, got))
# non-xml datastreams should have content-md5 & content-length headers
self.assertEqual(self.obj.image.checksum, response['Content-MD5'],
'datastream checksum should be set as Content-MD5 header in the response')
self.assertTrue(response.has_header('Content-Length'),
'content-length header should be set in the response for binary datastreams')
# non-existent datastream should 404
self.assertRaises(Http404, raw_datastream, rqst, self.obj.pid, 'BOGUS-DSID')
# non-existent record should 404
self.assertRaises(Http404, raw_datastream, rqst, 'bogus-pid:1', 'DC')
# check type handling?
# set extra headers in the response
extra_headers = {'Content-Disposition': 'attachment; filename=foo.txt'}
response = raw_datastream(rqst, self.obj.pid, 'TEXT',
headers=extra_headers)
self.assertTrue(response.has_header('Content-Disposition'))
self.assertEqual(response['Content-Disposition'], extra_headers['Content-Disposition'])
# explicitly support GET and HEAD requests only
rqst.method = 'POST'
response = raw_datastream(rqst, self.obj.pid, 'DC')
expected, got = 405, response.status_code
self.assertEqual(expected, got,
'Expected %s (Method not Allowed) but returned %s for POST to raw_datastream view' \
% (expected, got))
# HEAD request is handled internally, for efficiency
rqst.method = 'HEAD'
response = raw_datastream(rqst, self.obj.pid, 'DC')
expected, got = 200, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for HEAD request on raw_datastream view' \
% (expected, got))
self.assertEqual('', response.content)
def test_raw_datastream_range(self):
# test http range requests
rqst = Mock()
rqst.method = 'GET'
rqst.META = {}
# use IMAGE for testing since it is binary content
# set range header in the request; bytes=0- : entire datastream
rqst.META['HTTP_RANGE'] = 'bytes=0-'
response = raw_datastream(rqst, self.obj.pid, 'IMAGE',
accept_range_request=True)
expected, got = 206, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream range request' \
% (expected, got))
content = response.content
self.assertEqual(self.obj.image.size, len(content),
'range request of bytes=0- should return entire content (expected %d, got %d)' \
% (self.obj.image.size, len(content)))
self.assertEqual(self.obj.image.size, int(response['Content-Length']),
'content-length header should be size of entire content (expected %d, got %d)' \
% (self.obj.image.size, int(response['Content-Length'])))
expected = 'bytes 0-%d/%d' % (self.obj.image.size - 1, self.obj.image.size)
self.assertEqual(expected, response['Content-Range'],
'content range response header should indicate bytes returned (expected %s, got %s)' \
% (expected, response['Content-Range']))
del response
# set range request for partial beginning content; bytes=0-150
bytes_requested = 'bytes=0-150'
rqst.META['HTTP_RANGE'] = bytes_requested
response = raw_datastream(rqst, self.obj.pid, 'IMAGE',
accept_range_request=True)
expected, got = 206, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream range request' \
% (expected, got))
content_len = 150
self.assertEqual(content_len, len(response.content),
'range request of %s should return %d bytes, got %d' \
% (bytes_requested, content_len, len(response.content)))
self.assertEqual(content_len, int(response['Content-Length']),
'content-length header should be set to partial size %d (got %d)' \
% (content_len, int(response['Content-Length'])))
expected = 'bytes 0-150/%d' % self.obj.image.size
self.assertEqual(expected, response['Content-Range'],
'content range response header should indicate bytes returned (expected %s, got %s)' \
% (expected, response['Content-Range']))
# set range request for partial middle content; bytes=10-150
bytes_requested = 'bytes=10-150'
rqst.META['HTTP_RANGE'] = bytes_requested
response = raw_datastream(rqst, self.obj.pid, 'IMAGE',
accept_range_request=True)
expected, got = 206, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream range request' \
% (expected, got))
content_len = 150 - 10
self.assertEqual(content_len, len(response.content),
'range request of %s should return %d bytes, got %d' \
% (bytes_requested, content_len, len(response.content)))
self.assertEqual(content_len, int(response['Content-Length']),
'content-length header should be set to partial size %d (got %d)' \
% (content_len, int(response['Content-Length'])))
expected = 'bytes 10-150/%d' % self.obj.image.size
self.assertEqual(expected, response['Content-Range'],
'content range response header should indicate bytes returned (expected %s, got %s)' \
% (expected, response['Content-Range']))
# set range request for partial end content; bytes=2000-3118
bytes_requested = 'bytes=2000-3118'
rqst.META['HTTP_RANGE'] = bytes_requested
response = raw_datastream(rqst, self.obj.pid, 'IMAGE',
accept_range_request=True)
expected, got = 206, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream range request' \
% (expected, got))
content_len = 3118 - 2000
self.assertEqual(content_len, len(response.content),
'range request of %s should return %d bytes, got %d' \
% (bytes_requested, content_len, len(response.content)))
self.assertEqual(content_len, int(response['Content-Length']),
'content-length header should be set to partial size %d (got %d)' \
% (content_len, int(response['Content-Length'])))
expected = 'bytes 2000-3118/%d' % self.obj.image.size
self.assertEqual(expected, response['Content-Range'],
'content range response header should indicate bytes returned (expected %s, got %s)' \
% (expected, response['Content-Range']))
# invalid or unsupported ranges should return 416, range not satisfiable
bytes_requested = 'bytes=10-9' # start > end
rqst.META['HTTP_RANGE'] = bytes_requested
response = raw_datastream(rqst, self.obj.pid, 'IMAGE',
accept_range_request=True)
expected, got = 416, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream invalid range request %s' \
% (expected, got, bytes_requested))
# complex ranges not yet supported
bytes_requested = 'bytes=1-10,30-50'
rqst.META['HTTP_RANGE'] = bytes_requested
response = raw_datastream(rqst, self.obj.pid, 'IMAGE',
accept_range_request=True)
expected, got = 416, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_datastream invalid range request %s' \
% (expected, got, bytes_requested))
def test_datastream_etag(self):
rqst = Mock()
rqst.META = {}
# DC
etag = datastream_etag(rqst, self.obj.pid, 'DC')
self.assertEqual(self.obj.dc.checksum, etag)
# bogus dsid should not error
etag = datastream_etag(rqst, self.obj.pid, 'bogus-datastream-id')
self.assertEqual(None, etag)
# range request 1 to end should return etag
rqst.META = {'HTTP_RANGE': 'bytes=1-'}
etag = datastream_etag(rqst, self.obj.pid, 'DC')
self.assertEqual(self.obj.dc.checksum, etag)
# any other range request should NOT return etag
rqst.META = {'HTTP_RANGE': 'bytes=300-500'}
etag = datastream_etag(rqst, self.obj.pid, 'DC', accept_range_request=True)
self.assertEqual(None, etag)
def test_datastream_lastmodified(self):
rqst = Mock()
rqst.META = {}
# DC
lastmod = datastream_lastmodified(rqst, self.obj.pid, 'DC')
self.assertEqual(self.obj.dc.created, lastmod)
# bogus dsid should not error
lastmod = datastream_lastmodified(rqst, self.obj.pid, 'bogus-datastream-id')
self.assertEqual(None, lastmod)
# range request should not affect last modification time
rqst.META = {'HTTP_RANGE': 'bytes=1-'}
lastmod = datastream_lastmodified(rqst, self.obj.pid, 'DC')
self.assertEqual(self.obj.dc.created, lastmod)
# any other range request should still return last modification time
rqst.META = {'HTTP_RANGE': 'bytes=300-500'}
lastmod = datastream_lastmodified(rqst, self.obj.pid, 'DC', accept_range_request=True)
self.assertEqual(self.obj.dc.created, lastmod)
def test_raw_audit_trail(self):
rqst = Mock()
rqst.method = 'GET'
# created with no ingest message = no audit trail
self.assertRaises(Http404, raw_audit_trail, rqst, self.obj.pid)
# modify object so it will have an audit trail
self.obj.dc.content.title = 'audit this!'
changelog = 'I just changed the title'
self.obj.save(changelog)
response = raw_audit_trail(rqst, self.obj.pid)
expected, got = 200, response.status_code
self.assertEqual(expected, got,
'Expected %s but returned %s for raw_audit_trail' \
% (expected, got))
expected, got = 'text/xml', response['Content-Type']
self.assertEqual(expected, got,
'Expected %s but returned %s for mimetype on raw_audit_trail' \
% (expected, got))
self.assert_('<audit:auditTrail' in response.content)
self.assert_('<audit:justification>%s</audit:justification>' % changelog
in response.content)
self.assert_('Last-Modified' in response)
def test_login_and_store_credentials_in_session(self):
# only testing custom logic, which happens on POST
# everything else is handled by django.contrib.auth
mockrequest = Mock()
mockrequest.method = 'POST'
def not_logged_in(rqst):
rqst.user.is_authenticated.return_value = False
def set_logged_in(rqst):
rqst.user.is_authenticated.return_value = True
rqst.POST.get.return_value = "TEST_PASSWORD"
# failed login
with patch('eulfedora.views.authviews.login',
new=Mock(side_effect=not_logged_in)):
mockrequest.session = dict()
response = login_and_store_credentials_in_session(mockrequest)
self.assert_(FEDORA_PASSWORD_SESSION_KEY not in mockrequest.session,
'user password for fedora should not be stored in session on failed login')
# successful login
with patch('eulfedora.views.authviews.login',
new=Mock(side_effect=set_logged_in)):
response = login_and_store_credentials_in_session(mockrequest)
self.assert_(FEDORA_PASSWORD_SESSION_KEY in mockrequest.session,
'user password for fedora should be stored in session on successful login')
# test password stored in the mock request
pwd = mockrequest.POST.get()
# encrypted password stored in session
sessionpwd = mockrequest.session[FEDORA_PASSWORD_SESSION_KEY]
self.assertNotEqual(pwd, sessionpwd,
'password should not be stored in the session without encryption')
self.assertEqual(pwd, cryptutil.decrypt(sessionpwd),
'user password stored in session is encrypted')
|
|
"""
This module contains the base Script class that all
scripts are inheriting from.
It also defines a few common scripts.
"""
from sys import getsizeof
from time import time
from collections import defaultdict
from twisted.internet.defer import maybeDeferred
from twisted.internet.task import LoopingCall
from django.conf import settings
from src.server import caches
from src.typeclasses.typeclass import TypeClass
from src.scripts.models import ScriptDB
from src.comms import channelhandler
from src.utils import logger, is_pypy
from django.utils.translation import ugettext as _
__all__ = ["Script", "DoNothing", "CheckSessions", "ValidateScripts", "ValidateChannelHandler"]
if not is_pypy:
__all__.append("ClearAttributeCache")
_SESSIONS = None
_ATTRIBUTE_CACHE_MAXSIZE = settings.ATTRIBUTE_CACHE_MAXSIZE # attr-cache size in MB.
#
# Base script, inherit from Script below instead.
#
class ScriptClass(TypeClass):
"""
Base class for scripts. Don't inherit from this, inherit from Script instead.
"""
# private methods
def __eq__(self, other):
"""
This has to be located at this level, having it in the
parent doesn't work.
"""
try:
return other.dbid == self.dbid
except Exception:
return False
def _start_task(self, start_now=True):
"start task runner"
self.ndb.twisted_task = LoopingCall(self._step_task)
if self.ndb._paused_time:
# we had paused the script, restarting
#print " start with paused time:", self.key, self.ndb._paused_time
self.ndb.twisted_task.start(self.ndb._paused_time, now=False)
else:
# starting script anew.
#print "_start_task: self.interval:", self.key, self.dbobj.interval
self.ndb.twisted_task.start(self.dbobj.interval, now=start_now and not self.start_delay)
self.ndb.time_last_called = int(time())
def _stop_task(self):
"stop task runner"
try:
#print "stopping twisted task:", id(self.ndb.twisted_task), self.obj
if self.ndb.twisted_task and self.ndb.twisted_task.running:
self.ndb.twisted_task.stop()
except Exception:
logger.log_trace()
def _step_err_callback(self, e):
"callback for runner errors"
cname = self.__class__.__name__
estring = _("Script %(key)s(#%(dbid)i) of type '%(cname)s': at_repeat() error '%(err)s'.") % \
{"key":self.key, "dbid":self.dbid, "cname":cname, "err":e.getErrorMessage()}
try:
self.dbobj.db_obj.msg(estring)
except Exception:
pass
logger.log_errmsg(estring)
def _step_succ_callback(self):
"step task runner. No try..except needed due to defer wrap."
if not self.is_valid():
self.stop()
return
self.at_repeat()
repeats = self.dbobj.db_repeats
if repeats <= 0:
pass # infinite repeat
elif repeats == 1:
self.stop()
return
else:
self.dbobj.db_repeats -= 1
self.ndb.time_last_called = int(time())
self.save()
if self.ndb._paused_time:
# this means we were running an unpaused script, for the time remaining
# after the pause. Now we start a normal-running timer again.
#print "switching to normal run:", self.key
del self.ndb._paused_time
self._stop_task()
self._start_task(start_now=False)
def _step_task(self):
"step task"
try:
d = maybeDeferred(self._step_succ_callback)
d.addErrback(self._step_err_callback)
return d
except Exception:
logger.log_trace()
# Public methods
def time_until_next_repeat(self):
"""
Returns the time in seconds until the script will be
run again. If this is not a stepping script, returns None.
This is not used in any way by the script's stepping
system; it's only here for the user to be able to
check in on their scripts and when they will next be run.
"""
try:
if self.ndb._paused_time:
return max(0, (self.ndb.time_last_called + self.ndb._paused_time) - int(time()))
else:
return max(0, (self.ndb.time_last_called + self.dbobj.db_interval) - int(time()))
except Exception:
return None
def start(self, force_restart=False):
"""
Called every time the script is started (for
persistent scripts, this is usually once every server start)
force_restart - if True, will always restart the script, regardless
of if it has started before.
returns 0 or 1 to indicated the script has been started or not. Used in counting.
"""
#print "Script %s (%s) start (active:%s, force:%s) ..." % (self.key, id(self.dbobj),
# self.is_active, force_restart)
if self.dbobj.is_active and not force_restart:
# script already runs and should not be restarted.
return 0
obj = self.obj
if obj:
# check so the scripted object is valid and initalized
try:
object.__getattribute__(obj, 'cmdset')
except AttributeError:
# this means the object is not initialized.
self.dbobj.is_active = False
return 0
# try to restart a paused script
if self.unpause():
return 1
# try to start the script from scratch
try:
self.dbobj.is_active = True
self.at_start()
if self.dbobj.db_interval > 0:
self._start_task()
return 1
except Exception:
logger.log_trace()
self.dbobj.is_active = False
return 0
def stop(self, kill=False):
"""
Called to stop the script from running.
This also deletes the script.
kill - don't call finishing hooks.
"""
#print "stopping script %s" % self.key
#import pdb
#pdb.set_trace()
if not kill:
try:
self.at_stop()
except Exception:
logger.log_trace()
if self.dbobj.db_interval > 0:
try:
self._stop_task()
except Exception:
logger.log_trace("Stopping script %s(%s)" % (self.key, self.dbid))
pass
try:
self.dbobj.delete()
except AssertionError:
logger.log_trace()
return 0
return 1
def pause(self):
"""
This stops a running script and stores its active state.
"""
#print "pausing", self.key, self.time_until_next_repeat()
dt = self.time_until_next_repeat()
if dt == None:
return
self.db._paused_time = dt
self._stop_task()
def unpause(self):
"""
Restart a paused script. This WILL call at_start().
"""
#print "unpausing", self.key, self.db._paused_time
dt = self.db._paused_time
if dt == None:
return False
try:
self.dbobj.is_active = True
self.at_start()
self.ndb._paused_time = dt
self._start_task(start_now=False)
del self.db._paused_time
except Exception:
logger.log_trace()
self.dbobj.is_active = False
return False
return True
# hooks
def at_script_creation(self):
"placeholder"
pass
def is_valid(self):
"placeholder"
pass
def at_start(self):
"placeholder."
pass
def at_stop(self):
"placeholder"
pass
def at_repeat(self):
"placeholder"
pass
def at_init(self):
"called when typeclass re-caches. Usually not used for scripts."
pass
#
# Base Script - inherit from this
#
class Script(ScriptClass):
"""
This is the class you should inherit from, it implements
the hooks called by the script machinery.
"""
def __init__(self, dbobj):
"""
This is the base TypeClass for all Scripts. Scripts describe events, timers and states in game,
they can have a time component or describe a state that changes under certain conditions.
Script API:
* Available properties (only available on initiated Typeclass objects)
key (string) - name of object
name (string)- same as key
aliases (list of strings) - aliases to the object. Will be saved to database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
dbobj (Object, read-only) - link to database model. dbobj.typeclass points back to this class
typeclass (Object, read-only) - this links back to this class as an identified only. Use self.swap_typeclass() to switch.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
desc (string) - optional description of script, shown in listings
obj (Object) - optional object that this script is connected to and acts on (set automatically by obj.scripts.add())
interval (int) - how often script should run, in seconds. <=0 turns off ticker
start_delay (bool) - if the script should start repeating right away or wait self.interval seconds
repeats (int) - how many times the script should repeat before stopping. <=0 means infinite repeats
persistent (bool) - if script should survive a server shutdown or not
is_active (bool) - if script is currently running
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create a database entry when storing data
* Helper methods
start() - start script (this usually happens automatically at creation and obj.script.add() etc)
stop() - stop script, and delete it
pause() - put the script on hold, until unpause() is called. If script is persistent, the pause state will survive a shutdown.
unpause() - restart a previously paused script. The script will continue as if it was never paused.
time_until_next_repeat() - if a timed script (interval>0), returns time until next tick
* Hook methods
at_script_creation() - called only once, when an object of this
class is first created.
is_valid() - is called to check if the script is valid to be running
at the current time. If is_valid() returns False, the running
script is stopped and removed from the game. You can use this
to check state changes (i.e. an script tracking some combat
stats at regular intervals is only valid to run while there is
actual combat going on).
at_start() - Called every time the script is started, which for persistent
scripts is at least once every server start. Note that this is
unaffected by self.delay_start, which only delays the first call
to at_repeat().
at_repeat() - Called every self.interval seconds. It will be called immediately
upon launch unless self.delay_start is True, which will delay
the first call of this method by self.interval seconds. If
self.interval<=0, this method will never be called.
at_stop() - Called as the script object is stopped and is about to be removed from
the game, e.g. because is_valid() returned False.
at_server_reload() - Called when server reloads. Can be used to save temporary
variables you want should survive a reload.
at_server_shutdown() - called at a full server shutdown.
"""
super(Script, self).__init__(dbobj)
def at_script_creation(self):
"""
Only called once, by the create function.
"""
self.key = "<unnamed>"
self.desc = ""
self.interval = 0 # infinite
self.start_delay = False
self.repeats = 0 # infinite
self.persistent = False
def is_valid(self):
"""
Is called to check if the script is valid to run at this time.
Should return a boolean. The method is assumed to collect all needed
information from its related self.obj.
"""
return True
def at_start(self):
"""
Called whenever the script is started, which for persistent
scripts is at least once every server start. It will also be called
when starting again after a pause (such as after a server reload)
"""
pass
def at_repeat(self):
"""
Called repeatedly if this Script is set to repeat
regularly.
"""
pass
def at_stop(self):
"""
Called whenever when it's time for this script to stop
(either because is_valid returned False or )
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for restart/reboot.
If you want to, for example, save non-persistent properties across a restart,
this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully (i.e. not for
a restart).
"""
pass
# Some useful default Script types used by Evennia.
class DoNothing(Script):
"An script that does nothing. Used as default fallback."
def at_script_creation(self):
"Setup the script"
self.key = "sys_do_nothing"
self.desc = _("This is an empty placeholder script.")
class Store(Script):
"Simple storage script"
def at_script_creation(self):
"Setup the script"
self.key = "sys_storage"
self.desc = _("This is a generic storage container.")
class CheckSessions(Script):
"Check sessions regularly."
def at_script_creation(self):
"Setup the script"
self.key = "sys_session_check"
self.desc = _("Checks sessions so they are live.")
self.interval = 60 # repeat every 60 seconds
self.persistent = True
def at_repeat(self):
"called every 60 seconds"
global _SESSIONS
if not _SESSIONS:
from src.server.sessionhandler import SESSIONS as _SESSIONS
#print "session check!"
#print "ValidateSessions run"
_SESSIONS.validate_sessions()
class ValidateScripts(Script):
"Check script validation regularly"
def at_script_creation(self):
"Setup the script"
self.key = "sys_scripts_validate"
self.desc = _("Validates all scripts regularly.")
self.interval = 3600 # validate every hour.
self.persistent = True
def at_repeat(self):
"called every hour"
#print "ValidateScripts run."
ScriptDB.objects.validate()
class ValidateChannelHandler(Script):
"Update the channelhandler to make sure it's in sync."
def at_script_creation(self):
"Setup the script"
self.key = "sys_channels_validate"
self.desc = _("Updates the channel handler")
self.interval = 3700 # validate a little later than ValidateScripts
self.persistent = True
def at_repeat(self):
"called every hour+"
#print "ValidateChannelHandler run."
channelhandler.CHANNELHANDLER.update()
class ClearAttributeCache(Script):
"Clear the attribute cache."
def at_script_creation(self):
"Setup the script"
self.key = "sys_cache_clear"
self.desc = _("Clears the Attribute Cache")
self.interval = 3600 * 2
self.persistent = True
def at_repeat(self):
"called every 2 hours. Sets a max attr-cache limit to 100 MB." # enough for normal usage?
if is_pypy:
# pypy don't support get_size, so we have to skip out here.
return
attr_cache_size, _, _ = caches.get_cache_sizes()
if attr_cache_size > _ATTRIBUTE_CACHE_MAXSIZE:
caches.flush_attr_cache()
|
|
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
# Add the library location to the path
import sys
sys.path.insert(0, 'lib')
import os
import httplib2
import sessions
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build
from apiclient.http import MediaUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
"""Create a new instance of drive state.
Parse and load the JSON state parameter.
Args:
state: State query parameter as a string.
"""
if state:
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
else:
self.action = 'create'
self.ids = []
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get('state'))
class BaseDriveHandler(webapp.RequestHandler):
"""Base request handler for drive applications.
Adds Authorization support for Drive.
"""
def CreateOAuthFlow(self):
"""Create OAuth2.0 flow controller
This controller can be used to perform all parts of the OAuth 2.0 dance
including exchanging an Authorization code.
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = flow_from_clientsecrets('client_secrets.json', scope='')
# Dynamically set the redirect_uri based on the request URL. This is extremely
# convenient for debugging to an alternative host without manually setting the
# redirect URI.
flow.redirect_uri = self.request.url.split('?', 1)[0].rsplit('/', 1)[0]
return flow
def GetCodeCredentials(self):
"""Create OAuth 2.0 credentials by extracting a code and performing OAuth2.0.
The authorization code is extracted form the URI parameters. If it is absent,
None is returned immediately. Otherwise, if it is present, it is used to
perform step 2 of the OAuth 2.0 web server flow.
Once a token is received, the user information is fetched from the userinfo
service and stored in the session. The token is saved in the datastore against
the user ID received from the userinfo service.
Args:
request: HTTP request used for extracting an authorization code and the
session information.
Returns:
OAuth2.0 credentials suitable for authorizing clients or None if
Authorization could not take place.
"""
# Other frameworks use different API to get a query parameter.
code = self.request.get('code')
if not code:
# returns None to indicate that no code was passed from Google Drive.
return None
# Auth flow is a controller that is loaded with the client information,
# including client_id, client_secret, redirect_uri etc
oauth_flow = self.CreateOAuthFlow()
# Perform the exchange of the code. If there is a failure with exchanging
# the code, return None.
try:
creds = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return None
# Create an API service that can use the userinfo API. Authorize it with our
# credentials that we gained from the code exchange.
users_service = CreateService('oauth2', 'v2', creds)
# Make a call against the userinfo service to retrieve the user's information.
# In this case we are interested in the user's "id" field.
userid = users_service.userinfo().get().execute().get('id')
# Store the user id in the user's cookie-based session.
session = sessions.LilCookies(self, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
# Store the credentials in the data store using the userid as the key.
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(self):
"""Get OAuth 2.0 credentials for an HTTP session.
If the user has a user id stored in their cookie session, extract that value
and use it to load that user's credentials from the data store.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
# Try to load the user id from the session
session = sessions.LilCookies(self, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if not userid:
# return None to indicate that no credentials could be loaded from the
# session.
return None
# Load the credentials from the data store, using the userid as a key.
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
# if the credentials are invalid, return None to indicate that the credentials
# cannot be used.
if creds and creds.invalid:
return None
return creds
def RedirectAuth(self):
"""Redirect a handler to an authorization page.
Used when a handler fails to fetch credentials suitable for making Drive API
requests. The request is redirected to an OAuth 2.0 authorization approval
page and on approval, are returned to application.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = self.CreateOAuthFlow()
# Manually add the required scopes. Since this redirect does not originate
# from the Google Drive UI, which authomatically sets the scopes that are
# listed in the API Console.
flow.scope = ALL_SCOPES
# Create the redirect URI by performing step 1 of the OAuth 2.0 web server
# flow.
uri = flow.step1_get_authorize_url(flow.redirect_uri)
# Perform the redirect.
self.redirect(uri)
def RespondJSON(self, data):
"""Generate a JSON response and return it to the client.
Args:
data: The data that will be converted to JSON to return.
"""
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def CreateAuthorizedService(self, service, version):
"""Create an authorize service instance.
The service can only ever retrieve the credentials from the session.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
Returns:
Authorized service or redirect to authorization flow if no credentials.
"""
# For the service, the session holds the credentials
creds = self.GetSessionCredentials()
if creds:
# If the session contains credentials, use them to create a Drive service
# instance.
return CreateService(service, version, creds)
else:
# If no credentials could be loaded from the session, redirect the user to
# the authorization page.
self.RedirectAuth()
def CreateDrive(self):
"""Create a drive client instance."""
return self.CreateAuthorizedService('drive', 'v2')
def CreateUserInfo(self):
"""Create a user info client instance."""
return self.CreateAuthorizedService('oauth2', 'v2')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,invalid-name
import random
import tempfile
import shutil
import os.path
# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from ansible.module_utils.basic import * # noqa: F403
DOCUMENTATION = '''
---
module: openshift_container_binary_sync
short_description: Copies OpenShift binaries out of the given image tag to host system.
'''
class BinarySyncError(Exception):
def __init__(self, msg):
super(BinarySyncError, self).__init__(msg)
self.msg = msg
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
a container onto the host system.
"""
def __init__(self, module, image, tag, backend):
self.module = module
self.changed = False
self.output = []
self.bin_dir = '/usr/local/bin'
self._image = image
self.tag = tag
self.backend = backend
self.temp_dir = None # TBD
def sync(self):
if self.backend == 'atomic':
return self._sync_atomic()
return self._sync_docker()
def _sync_atomic(self):
self.temp_dir = tempfile.mkdtemp()
temp_dir_mount = tempfile.mkdtemp()
try:
image_spec = '%s:%s' % (self.image, self.tag)
rc, stdout, stderr = self.module.run_command(['atomic', 'mount',
'--storage', "ostree",
image_spec, temp_dir_mount])
if rc:
raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" %
(stdout, stderr))
for i in ["openshift", "oc"]:
src_file = os.path.join(temp_dir_mount, "usr/bin", i)
shutil.copy(src_file, self.temp_dir)
self._sync_binaries()
finally:
self.module.run_command(['atomic', 'umount', temp_dir_mount])
shutil.rmtree(temp_dir_mount)
shutil.rmtree(self.temp_dir)
def _sync_docker(self):
container_name = "openshift-cli-%s" % random.randint(1, 100000)
rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
container_name, '%s:%s' % (self.image, self.tag)])
if rc:
raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
(stdout, stderr))
self.output.append(stdout)
try:
self.temp_dir = tempfile.mkdtemp()
self.output.append("Using temp dir: %s" % self.temp_dir)
rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name,
self.temp_dir])
if rc:
raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
(stdout, stderr))
rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name,
self.temp_dir])
if rc:
raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
(stdout, stderr))
self._sync_binaries()
finally:
shutil.rmtree(self.temp_dir)
self.module.run_command(['docker', 'rm', container_name])
def _sync_binaries(self):
self._sync_binary('openshift')
# In older versions, oc was a symlink to openshift:
if os.path.islink(os.path.join(self.temp_dir, 'oc')):
self._sync_symlink('oc', 'openshift')
else:
self._sync_binary('oc')
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
self._sync_symlink('oadm', 'openshift')
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
# The symlink we are creating:
link_path = os.path.join(self.bin_dir, binary_name)
# The expected file we should be linking to:
link_dest = os.path.join(self.bin_dir, link_to)
if not os.path.exists(link_path) or \
not os.path.islink(link_path) or \
os.path.realpath(link_path) != os.path.realpath(link_dest):
if os.path.exists(link_path):
os.remove(link_path)
os.symlink(link_to, os.path.join(self.bin_dir, binary_name))
self.output.append("Symlinked %s to %s." % (link_path, link_dest))
self.changed = True
def _sync_binary(self, binary_name):
src_path = os.path.join(self.temp_dir, binary_name)
dest_path = os.path.join(self.bin_dir, binary_name)
incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
# See: https://github.com/openshift/openshift-ansible/issues/4965
if os.path.islink(dest_path):
os.unlink(dest_path)
self.output.append('Removed old symlink {} before copying binary.'.format(dest_path))
shutil.move(src_path, dest_path)
self.output.append("Moved %s to %s." % (src_path, dest_path))
self.changed = True
@property
def raw_image(self):
"""
Returns the image as it was originally passed in to the instance.
.. note::
This image string will only work directly with the atomic command.
:returns: The original image passed in.
:rtype: str
"""
return self._image
@property
def image(self):
"""
Returns the image without atomic prefixes used to map to skopeo args.
:returns: The image string without prefixes
:rtype: str
"""
image = self._image
for remove in ('oci:', 'http:', 'https:'):
if image.startswith(remove):
image = image.replace(remove, '')
return image
def main():
module = AnsibleModule( # noqa: F405
argument_spec=dict(
image=dict(required=True),
tag=dict(required=True),
backend=dict(required=True),
),
supports_check_mode=True
)
image = module.params['image']
tag = module.params['tag']
backend = module.params['backend']
if backend not in ["docker", "atomic"]:
module.fail_json(msg="unknown backend")
binary_syncer = BinarySyncer(module, image, tag, backend)
try:
binary_syncer.sync()
except BinarySyncError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=binary_syncer.changed,
output=binary_syncer.output)
if __name__ == '__main__':
main()
|
|
import fnmatch
import os
import re
import sys
NAME = "name"
DISPLAY_NAME = "display_name"
SIZE = "size"
PATH = "path"
BASE_FILE = "base_file"
LINE = "line"
ASM = "asm"
STACK_SIZE = "stack_size"
STACK_QUALIFIERS = "stack_qualifiers"
ADDRESS = "address"
TYPE = "type"
TYPE_FUNCTION = "function"
TYPE_VARIABLE = "variable"
TYPE_FILE = "file"
TYPE_FOLDER = "folder"
PREV_FUNCTION = "prev_function"
NEXT_FUNCTION = "next_function"
FUNCTIONS = "functions"
VARIABLES = "variables"
SYMBOLS = "symbols"
FILE = "file"
FILES = "files"
FOLDER = "folder"
ROOT = "root"
ANCESTORS = "ancestors"
SUB_FOLDERS = "sub_folders"
COLLAPSED_NAME = "collapsed_name"
COLLAPSED_SUB_FOLDERS = "collapsed_sub_folders"
CALLEES = "callees"
CALLERS = "callers"
DEEPEST_CALLEE_TREE = "deepest_callee_tree"
DEEPEST_CALLER_TREE = "deepest_caller_tree"
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
def left_strip_from_list(lines):
if len(lines) <= 0:
return lines
# detect longest common sequence of white spaces
longest_match = re.match(r"^\s*", lines[0]).group(0)
for line in lines:
while not line.startswith(longest_match):
longest_match = longest_match[:-1]
# remove from each string
return list([line[len(longest_match):] for line in lines])
class Collector:
def __init__(self, gcc_tools):
self.gcc_tools = gcc_tools
self.symbols = {}
self.file_elements = {}
self.symbols_by_qualified_name = None
self.symbols_by_name = None
def reset(self):
self.symbols = {}
self.file_elements = {}
self.symbols_by_qualified_name = None
self.symbols_by_name = None
def qualified_symbol_name(self, symbol):
if BASE_FILE in symbol:
return os.path.join(symbol[PATH], symbol[NAME])
return symbol[NAME]
def symbol(self, name, qualified=True):
self.build_symbol_name_index()
index = self.symbols_by_qualified_name if qualified else self.symbols_by_name
return index.get(name, None)
def symbol_by_addr(self, addr):
int_addr = int(addr, 16)
return self.symbols.get(int_addr, None)
def add_symbol(self, name, address, size=None, file=None, line=None, assembly_lines=None, type=None, stack_size=None):
int_address = int(address, 16)
sym = self.symbols.get(int_address, {})
if NAME in sym and sym[NAME] != name:
# warning("Name for symbol at %s inconsistent (was '%s', now '%s')" % (address, sym[NAME], name))
pass
else:
sym[NAME] = name
if size:
sym[SIZE] = int(size)
if file:
sym[PATH] = file
sym[BASE_FILE] = os.path.basename(file)
if line:
sym[LINE] = line
if assembly_lines:
assembly_lines = left_strip_from_list(assembly_lines)
sym[ASM] = assembly_lines
sym[TYPE] = TYPE_FUNCTION
if type:
sym[TYPE] = type
if stack_size:
sym[STACK_SIZE] = stack_size
sym[ADDRESS] = address
self.symbols[int_address] = sym
return sym
# 00000550 00000034 T main /Users/behrens/Documents/projects/pebble/puncover/puncover/build/../src/puncover.c:25
parse_size_line_re = re.compile(r"^([\da-f]{8})\s+([\da-f]{8})\s+(.)\s+(\w+)(\s+([^:]+):(\d+))?")
def parse_size_line(self, line):
# print(line)
match = self.parse_size_line_re.match(line)
if not match:
return False
addr = match.group(1)
size = int(match.group(2), 16)
type = match.group(3)
name = match.group(4)
if match.group(5):
file = match.group(6)
line = int(match.group(7))
else:
file = None
line = None
types = {"T": TYPE_FUNCTION, "D": TYPE_VARIABLE, "B": TYPE_VARIABLE, "R": TYPE_VARIABLE}
self.add_symbol(name, address=addr, size=size, file=file, line=line, type = types.get(type.upper(), None))
return True
# 00000098 <pbl_table_addr>:
# 00000098 <pbl_table_addr.constprop.0>:
parse_assembly_text_function_start_pattern = re.compile(r"^([\da-f]{8})\s+<(\.?\w+)(\..*)?>:")
# /Users/behrens/Documents/projects/pebble/puncover/pebble/build/../src/puncover.c:8
parse_assembly_text_c_reference_pattern = re.compile(r"^(/[^:]+)(:(\d+))?")
def parse_assembly_text(self, assembly):
# print(assembly)
name = None
addr = None
symbol_file = None
symbol_line = None
assembly_lines = []
found_symbols = 0
def flush_current_symbol():
if name and addr:
self.add_symbol(name, addr, assembly_lines=assembly_lines, file=symbol_file, line=symbol_line)
return 1
return 0
for line in assembly.split("\n"):
match = self.parse_assembly_text_function_start_pattern.match(line)
if match:
found_symbols += flush_current_symbol()
addr = match.group(1)
name = match.group(2)
symbol_file = None
symbol_line = None
assembly_lines = []
else:
file_match = self.parse_assembly_text_c_reference_pattern.match(line)
if not file_match and line.strip() != "":
assembly_lines.append(line)
elif file_match and not symbol_file:
symbol_file = file_match.group(1)
if file_match.group(3):
symbol_line = int(file_match.group(3))
found_symbols += flush_current_symbol()
return found_symbols
# puncover.c:8:43:dynamic_stack2 16 dynamic
# puncover.c:14:40:0 16 dynamic,bounded
# puncover.c:8:43:dynamic_stack2 16 dynamic
parse_stack_usage_line_pattern = re.compile(r"^(.*?\.[ch](pp)?):(\d+):(\d+):([^\t]+)\t+(\d+)\s+([a-z,]+)")
def parse_stack_usage_line(self, line):
match = self.parse_stack_usage_line_pattern.match(line)
if not match:
return False
base_file_name = match.group(1)
line = int(match.group(3))
symbol_name = match.group(5)
stack_size = int(match.group(6))
stack_qualifier = match.group(7)
return self.add_stack_usage(base_file_name, line, symbol_name, stack_size, stack_qualifier)
# TODO: handle operators, e.g. String::operator=(char const*)
# TODO: handle templates, e.g. void LinkedList<T>::clear() [with T = Loggable]
re_cpp_display_name = re.compile(r"^(\w[^\(\s]*\s)*(\w+::~?)?(\w+)(\([^\)]*\))?(\sconst)?$")
def display_name_simplified(self, name):
# .su files have elements such as "virtual size_t Print::write(const uint8_t*, size_t)"
# c++filt gives us "Print::write(unsigned char const*, unsigned int)"
m = self.re_cpp_display_name.match(name)
if m:
groups = list(m.groups(''))
def replace_identifiers(m):
# these values were derived from an ARM 32Bit target
# it could be that they need further adjustments
# yes, we are treating int as long works only for 32bit platforms
# right now, our sample projects use both types unpredictably in the same binary (oh, dear)
mapping = {
'const': '', # we ignore those as a feasible simplification
'size_t': 'unsigned long',
'uint8_t': 'unsigned char',
'int8_t': 'signed char',
'uint16_t': 'unsigned short',
'int16_t': 'short',
'uint32_t': 'unsigned long',
'int32_t': 'long',
'uint64_t': 'unsigned long long',
'int64_t': 'long long',
'byte': 'unsigned char',
'int': 'long',
}
return mapping.get(m.group(), m.group())
# in case, we have parameters, simplify those
groups[3] = re.sub(r'\w+', replace_identifiers, groups[3])
# TODO: C allows you to write the same C types in many different notations
# http://ieng9.ucsd.edu/~cs30x/Std.C/types.html#Basic%20Integer%20Types
# applies to tNMEA2000::SetProductInformation or Print::printNumber
# remove leading "virtual size_t" etc.
# non-matching groups should be empty strings
name = ''.join(groups[1:])
# remove white space artifacts from previous replacements
for k, v in [(' ', ' '), (' ', ' '), ('( ', '('), (' )', ')'), ('< ', '<'), (' >', '>'), (' *', '*'), (' &', '&')]:
name = name.replace(k, v)
return name
def display_names_match(self, a, b):
if a is None or b is None:
return False
if a == b:
return True
simplified_a = self.display_name_simplified(a)
simplified_b = self.display_name_simplified(b)
return simplified_a == simplified_b
def add_stack_usage(self, base_file_name, line, symbol_name, stack_size, stack_qualifier):
basename_symbols = [s for s in self.symbols.values() if s.get(BASE_FILE, None) == base_file_name]
for symbol in basename_symbols:
if symbol.get(LINE, None) == line or self.display_names_match(symbol_name, symbol.get(DISPLAY_NAME, None)):
symbol[STACK_SIZE] = stack_size
symbol[STACK_QUALIFIERS] = stack_qualifier
return True
# warning("Couldn't find symbol for %s:%d:%s" % (base_file_name, line, symbol_name))
return False
def normalize_files_paths(self, base_dir):
base_dir = os.path.abspath(base_dir) if base_dir else "/"
for s in self.all_symbols():
path = s.get(PATH, None)
if path:
if path.startswith(base_dir):
path = os.path.relpath(path, base_dir)
elif path.startswith("/"):
path = path[1:]
s[PATH] = path
def unmangle_cpp_names(self):
symbol_names = list(symbol[NAME] for symbol in self.all_symbols())
unmangled_names = self.gcc_tools.get_unmangled_names(symbol_names)
for s in self.all_symbols():
s[DISPLAY_NAME] = unmangled_names[s[NAME]]
def parse_elf(self, elf_file):
print("parsing ELF at %s" % elf_file)
self.parse_assembly_text("".join(self.gcc_tools.get_assembly_lines(elf_file)))
for l in self.gcc_tools.get_size_lines(elf_file):
self.parse_size_line(l)
self.elf_mtime = os.path.getmtime(elf_file)
def parse_su_dir(self, su_dir):
def gen_find(filepat,top):
for path, dirlist, filelist in os.walk(top):
for name in fnmatch.filter(filelist,filepat):
yield os.path.join(path,name)
def gen_open(filenames):
for name in filenames:
yield open(name)
def gen_cat(sources):
for s in sources:
for item in s:
yield item
def get_stack_usage_lines(su_dir):
names = gen_find("*.su", su_dir)
files = gen_open(names)
lines = gen_cat(files)
return lines
if su_dir:
print("parsing stack usages starting at %s" % su_dir)
for l in get_stack_usage_lines(su_dir):
self.parse_stack_usage_line(l)
def sorted_by_size(self, symbols):
return sorted(symbols, key=lambda k: k.get("size", 0), reverse=True)
def all_symbols(self):
return self.sorted_by_size(self.symbols.values())
def all_functions(self):
return list([f for f in self.all_symbols() if f.get(TYPE, None) == TYPE_FUNCTION])
def all_variables(self):
return list([f for f in self.all_symbols() if f.get(TYPE, None) == TYPE_VARIABLE])
def enhance_assembly(self):
for key, symbol in self.symbols.items():
if ASM in symbol:
symbol[ASM] = list([self.enhanced_assembly_line(l) for l in symbol[ASM]])
def add_function_call(self, caller, callee):
if caller != callee:
if not callee in caller[CALLEES]:
caller[CALLEES].append(callee)
if not caller in callee[CALLERS]:
callee[CALLERS].append(caller)
caller_file = caller.get("file", None)
callee_file = callee.get("file", None)
if callee_file and caller_file and callee_file != caller_file:
callee["called_from_other_file"] = True
# 934: f7ff bba8 b.w 88 <jump_to_pbl_function>
# 8e4: f000 f824 bl 930 <app_log>
#
# but not:
# 805bbac: 2471 0805 b64b 0804 b3c9 0804 b459 0804 q$..K.......Y...
enhance_call_tree_pattern = re.compile(r"^\s*[\da-f]+:\s+[\d\sa-f]{9}\s+BL?(EQ|NE|CS|HS|CC|LO|MI|PL|VS|VC|HI|LS|GE|LT|GT|LE|AL)?(\.W|\.N)?\s+([\d\sa-f]+)", re.IGNORECASE)
def enhance_call_tree_from_assembly_line(self, function, line):
if "<" not in line:
return False
match = self.enhance_call_tree_pattern.match(line)
if match:
callee = self.symbol_by_addr(match.group(3))
if callee:
self.add_function_call(function, callee)
return True
return False
def enhance_call_tree(self):
for f in self.all_functions():
for k in [CALLERS, CALLEES]:
f[k] = f.get(k, [])
for f in self.all_functions():
if ASM in f:
[self.enhance_call_tree_from_assembly_line(f, l) for l in f[ASM]]
def enhance(self, src_root):
self.normalize_files_paths(src_root)
print("enhancing function sizes")
self.enhance_function_size_from_assembly()
print("deriving folders")
self.derive_folders()
print("enhancing file elements")
self.enhance_file_elements()
print("enhancing assembly")
self.enhance_assembly()
print("enhancing call tree")
self.enhance_call_tree()
print("enhancing siblings")
self.enhance_sibling_symbols()
self.enhance_symbol_flags()
print("unmangling c++ symbols")
self.unmangle_cpp_names()
# 98: a8a8a8a8 bl 98
enhanced_assembly_line_pattern = re.compile(r"^\s*[\da-f]+:\s+[\d\sa-f]{9}\s+bl\s+([\d\sa-f]+)\s*$")
def enhanced_assembly_line(self, line):
match = self.enhanced_assembly_line_pattern.match(line)
if match:
symbol = self.symbol_by_addr(match.group(1))
if symbol:
return line+ " <%s>" % (symbol["name"])
return line
# 88a: ebad 0d03 sub.w sp, sp, r3
count_assembly_code_bytes_re = re.compile(r"^\s*[\da-f]+:\s+([\d\sa-f]{9})")
def count_assembly_code_bytes(self, line):
match = self.count_assembly_code_bytes_re.match(line)
if match:
return len(match.group(1).replace(" ", "")) // 2
return 0
def enhance_function_size_from_assembly(self):
for f in self.all_symbols():
if ASM in f:
f[SIZE] = sum([self.count_assembly_code_bytes(l) for l in f[ASM]])
def enhance_sibling_symbols(self):
for f in self.all_functions():
if SIZE in f:
addr = int(f.get(ADDRESS), 16) + f.get(SIZE)
next_symbol = self.symbol_by_addr(hex(addr))
if next_symbol and next_symbol.get(TYPE, None) == TYPE_FUNCTION:
f[NEXT_FUNCTION] = next_symbol
for f in self.all_functions():
n = f.get(NEXT_FUNCTION, None)
if n:
n[PREV_FUNCTION] = f
def derive_folders(self):
for s in self.all_symbols():
p = s.get(PATH, "<unknown>/<unknown>")
p = os.path.normpath(p)
s[PATH] = p
s[BASE_FILE] = os.path.basename(p)
s[FILE] = self.file_for_path(p)
s[FILE][SYMBOLS].append(s)
def file_element_for_path(self, path, type, default_values):
if not path:
return None
result = self.file_elements.get(path, None)
if not result:
parent_dir = os.path.dirname(path)
parent_folder = self.folder_for_path(parent_dir) if parent_dir and parent_dir != "/" else None
result = {
TYPE: type,
PATH: path,
FOLDER: parent_folder,
NAME: os.path.basename(path),
}
for k, v in default_values.items():
result[k] = v
self.file_elements[path] = result
return result if result[TYPE] == type else None
def file_for_path(self, path):
return self.file_element_for_path(path, TYPE_FILE, {SYMBOLS:[]})
def folder_for_path(self, path):
return self.file_element_for_path(path, TYPE_FOLDER, {FILES:[], SUB_FOLDERS:[], COLLAPSED_SUB_FOLDERS:[]})
def file_items_ancestors(self, item):
while item.get(FOLDER):
item = item[FOLDER]
yield item
def enhance_file_elements(self):
for f in self.all_files():
parent = f.get(FOLDER, None)
if parent:
parent[FILES].append(f)
f[SYMBOLS] = sorted(f[SYMBOLS], key=lambda s: s[NAME])
f[FUNCTIONS] = list([s for s in f[SYMBOLS] if s.get(TYPE, None) == TYPE_FUNCTION])
f[VARIABLES] = list([s for s in f[SYMBOLS] if s.get(TYPE, None) == TYPE_VARIABLE])
for f in self.all_folders():
parent = f.get(FOLDER, None)
if parent:
parent[SUB_FOLDERS].append(f)
ancestors = list(self.file_items_ancestors(f))
f[ANCESTORS] = ancestors
if len(ancestors) > 0:
f[ROOT] = ancestors[-1]
collapsed_name = f[NAME]
for a in ancestors:
if len(f[FILES]) > 0:
a[COLLAPSED_SUB_FOLDERS].append(f)
if len(a[FILES]) > 0:
break
collapsed_name = os.path.join(a[NAME], collapsed_name)
f[COLLAPSED_NAME] = collapsed_name
for f in self.all_folders():
for k in [FILES, SUB_FOLDERS]:
f[k] = sorted(f[k], key=lambda s: s[NAME])
f[COLLAPSED_SUB_FOLDERS] = sorted(f[COLLAPSED_SUB_FOLDERS], key=lambda s: s[COLLAPSED_NAME])
def all_files(self):
return [f for f in self.file_elements.values() if f[TYPE] == TYPE_FILE]
def all_folders(self):
return [f for f in self.file_elements.values() if f[TYPE] == TYPE_FOLDER]
def root_folders(self):
return [f for f in self.all_folders() if not f[FOLDER]]
def collapsed_root_folders(self):
result = []
def non_empty_leafs(f):
if len(f[FILES]) > 0:
result.append(f)
else:
for s in f[SUB_FOLDERS]:
non_empty_leafs(s)
for f in self.root_folders():
non_empty_leafs(f)
return result
def enhance_symbol_flags(self):
is_float_function_pattern = re.compile(r"^__aeabi_(f.*|.*2f)|__addsf3$")
def is_float_function_name(n):
return is_float_function_pattern.match(n)
float_functions = [f for f in self.all_functions() if is_float_function_name(f[NAME])]
for f in self.all_functions():
callees = f[CALLEES]
f["calls_float_function"] = any([ff in callees for ff in float_functions])
for file in self.all_files():
file["calls_float_function"] = any([f["calls_float_function"] for f in file[FUNCTIONS]])
def folder_calls_float_function(folder):
result = any([f["calls_float_function"] for f in folder[FILES]])
for sub_folder in folder[SUB_FOLDERS]:
if folder_calls_float_function(sub_folder):
result = True
folder["calls_float_function"] = result
return result
for folder in self.root_folders():
folder_calls_float_function(folder)
def build_symbol_name_index(self):
if not self.symbols_by_name or not self.symbols_by_qualified_name:
self.symbols_by_name = {}
self.symbols_by_qualified_name = {}
for s in self.symbols.values():
name = s[NAME]
if name:
self.symbols_by_name[name] = s
qualified_name = self.qualified_symbol_name(s)
if qualified_name:
self.symbols_by_qualified_name[qualified_name] = s
|
|
import re
import logging
from subprocess import Popen, PIPE, CalledProcessError, check_output as _check_output
from dummy import config
from dummy.utils import io
__all__ = ( "parse", "baseline" )
logger = logging.getLogger( __name__ )
# alias check_output to not pipe to stdout
check_output = lambda cmd: _check_output( cmd, stderr=PIPE )
class LcovError( Exception ): pass
class Parser:
rheader = re.compile( r'^TN:(?P<name>.*)$' )
rfooter = re.compile( r'^end_of_record$' )
rpath = re.compile( r'^SF:(?P<path>.*)$' )
rfunction_hits = re.compile( r'^FNDA:(?P<hits>\d+),(?P<name>.*)$' )
rfunctions_found= re.compile( r'^FNF:(?P<found>\d+)$' )
rfunctions_hit = re.compile( r'^FNH:(?P<hits>\d+)$' )
rlines_found = re.compile( r'^LF:(?P<found>\d+)$' )
rlines_hit = re.compile( r'^LH:(?P<hits>\d+)$' )
@staticmethod
def parse( info ):
""" Parses lcov info file into a dictionary
args:
info: lcov info data to parse
returns:
{dict}: coverage results of the form:
{
'files': {
'<filename>': {
'lines': <int>,
'lines_hit': <int>,
'functions': <int>,
'functions_hit': <int>,
'branches': <int>,
'branches_hit': <int>,
'functions': {
'<name'>: <times_run>
'<name'>: <times_run>
...
}
}
},
'lines': <int>,
'lines_hit': <int>,
'functions': <int>,
'functions_hit': <int>
}
raises:
TypeError: When the info file is not formatted correctly.
"""
result = {
'files': {}
}
fresult = None
fpath = None
# totals over the test
lines = 0
lines_hit = 0
functions = 0
functions_hit = 0
for line in info.splitlines():
# first make sure we are in a file section
if fresult is None:
m = Parser.rpath.match( line )
if m is not None:
fresult = {
'function_coverage': {}
}
fpath = m.group( 'path' )
continue
elif Parser.rheader.match( line ):
continue
else:
raise TypeError( "Invalid coverage file format." )
continue
else:
# single function hits
m = Parser.rfunction_hits.match( line )
if m is not None:
fresult[ 'function_coverage' ][ m.group( 'name' )] = int( m.group( 'hits' ))
continue
# functions hit
m = Parser.rfunctions_hit.match( line )
if m is not None:
hit = int( m.group( 'hits' ))
fresult[ 'functions_hit' ] = hit
# also add to the total
functions_hit += hit
continue
# functions found
m = Parser.rfunctions_found.match( line )
if m is not None:
found = int( m.group( 'found' ))
fresult[ 'functions' ] = found
# also add to the total
functions += found
continue
# lines hit
m = Parser.rlines_hit.match( line )
if m is not None:
hit = int( m.group( 'hits' ))
fresult[ 'lines_hit' ] = hit
# total
lines_hit += hit
continue
# lines found
m = Parser.rlines_found.match( line )
if m is not None:
found = int( m.group( 'found' ))
fresult[ 'lines' ] = found
# total
lines += found
continue
# make sure we close the file section properly
m = Parser.rfooter.match( line )
if m is not None:
assert fpath is not None, "lcov file section had no SF entry (no file path)"
result[ 'files' ][ fpath ] = fresult
fresult = None
continue
# if we got here, we got an unrecognized line
# logger.debug( "Got unrecognizable line: %s" % line )
result[ 'lines' ] = lines
result[ 'lines_hit' ] = lines_hit
result[ 'functions' ] = functions
result[ 'functions_hit' ] = functions_hit
return result
# alias that shit
def parse( info ): return Parser.parse( info )
def makeopts( destination=None, branch_coverage=False ):
opts = []
# set output
if destination is not None:
opts += [ '-o', destination ]
# specify we want branch coverage
if branch_coverage:
opts += [ '--rc', 'lcov_branch_coverage=1' ]
# this does not work if gcno files are generated in another dir
# then the obj files, which is true for setups that move obj files
# opts += [ '--no-external' ]
return opts
def _extract( path, extract, branch_coverage=False):
opts = makeopts( path, branch_coverage )
opts += [ "--extract", path]
for p in extract:
opts.append( p )
try:
check_output([ 'lcov' ] + opts )
except CalledProcessError as e:
logger.debug( "Lcov reported: %s" % e.output )
raise LcovError( "Filtering the coverage data failed" )
def _remove( path, remove, branch_coverage=False ):
opts = makeopts( path, branch_coverage )
# set removes
opts += [ "--remove", path ]
for p in remove:
opts.append( p )
try:
check_output([ 'lcov' ] + opts )
except CalledProcessError as e:
logger.debug( "Lcov reported: %s" % e.output )
raise LcovError( "Filtering the coverage data failed" )
def filter( path, extract=[], remove=[] ):
if len( extract ) > 0:
_extract( path, extract )
if len( remove ) > 0:
_remove( path, remove )
def baseline( destination, srcdirs, extract=[], remove=[], branch_coverage=False ):
assert len( srcdirs ) != 0, "Need atleast one srcdir to collect coverage from"
opts = makeopts( destination, branch_coverage )
# set src
for s in srcdirs:
opts += [ '-d', s ]
# create the baseline
try:
# make sure the target dir exists
io.create_dir( destination )
# create the baseline
check_output([ 'lcov', '-c', '-i' ] + opts )
# apply file filtering
filter( destination, extract=extract, remove=remove )
except CalledProcessError as e:
logger.debug( "Lcov reported: %s" % e.output )
raise LcovError( "Setting the lcov baseline failed" )
def collect( destination, srcdirs, baseline=None, extract=[], remove=[], branch_coverage=False ):
assert len( srcdirs ) != 0, "Need atleast one srcdir to collect coverage from"
opts = makeopts( destination, branch_coverage )
# set src
for s in srcdirs:
opts += [ '-d', s ]
try:
# make sure the target dir exists
io.create_dir( destination )
# collect the coverage
logger.debug([ 'lcov', '-c' ] + opts )
check_output([ 'lcov', '-c' ] + opts )
if baseline is not None:
# combine the data with the baseline
check_output([ 'lcov', '-a', baseline, '-a', destination ] + makeopts( destination, branch_coverage ))
# finally filter the collected data
filter( destination, extract=extract, remove=remove )
except CalledProcessError as e:
logger.debug( "Lcov reported: %s" % e.output )
raise LcovError( "Collecting coverage using lcov failed" )
def combine( destination, paths, branch_coverage=False ):
opts = makeopts( destination, branch_coverage )
# map paths to options for lcov
for path in paths:
opts += [ '-a', path ]
logger.debug([ 'lcov' ] + opts )
# combine the data with the accumulated set
try:
proc = check_output([ 'lcov' ] + opts )
except CalledProcessError as e:
logger.debug( "Lcov reported: %s" % e.output )
raise LcovError( "Combining the coverage data with lcov failed" )
def zero( srcdirs ):
opts = makeopts()
# set src
for s in srcdirs:
opts += [ '-d', s ]
try:
check_output([ 'lcov', '-z' ] + opts )
except CalledProcessError as e:
logger.debug( "Lcov reported: %s" % e.output )
raise LcovError( "Could not zero the coverage counters" )
|
|
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import object
from cgi import escape
from io import BytesIO as IO
import functools
import gzip
import dateutil.parser as dateparser
import json
import os
from flask import after_this_request, request, Response
from flask.ext.login import current_user
from jinja2 import Template
import wtforms
from wtforms.compat import text_type
from airflow import configuration, models, settings, utils
AUTHENTICATE = configuration.getboolean('webserver', 'AUTHENTICATE')
class LoginMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or (
not current_user.is_anonymous() and
current_user.is_authenticated()
)
)
class SuperUserMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
)
class DataProfilingMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
)
def limit_sql(sql, limit, conn_type):
sql = sql.strip()
sql = sql.rstrip(';')
if sql.lower().startswith("select"):
if conn_type in ['mssql']:
sql = """\
SELECT TOP {limit} * FROM (
{sql}
) qry
""".format(**locals())
elif conn_type in ['oracle']:
sql = """\
SELECT * FROM (
{sql}
) qry
WHERE ROWNUM <= {limit}
""".format(**locals())
else:
sql = """\
SELECT * FROM (
{sql}
) qry
LIMIT {limit}
""".format(**locals())
return sql
def action_logging(f):
'''
Decorator to log user actions
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
session = settings.Session()
if current_user and hasattr(current_user, 'username'):
user = current_user.username
else:
user = 'anonymous'
log = models.Log(
event=f.__name__,
task_instance=None,
owner=user,
extra=str(request.args.items()),
task_id=request.args.get('task_id'),
dag_id=request.args.get('dag_id'))
if 'execution_date' in request.args:
log.execution_date = dateparser.parse(
request.args.get('execution_date'))
session.add(log)
session.commit()
return f(*args, **kwargs)
return wrapper
def notify_owner(f):
'''
Decorator to notify owner of actions taken on their DAGs by others
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
if request.args.get('confirmed') == "true":
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
dagbag = models.DagBag(
os.path.expanduser(configuration.get('core', 'DAGS_FOLDER')))
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
if current_user and hasattr(current_user, 'username'):
user = current_user.username
else:
user = 'anonymous'
if task.owner != user:
subject = (
'Actions taken on DAG {0} by {1}'.format(
dag_id, user))
items = request.args.items()
content = Template('''
action: <i>{{ f.__name__ }}</i><br>
<br>
<b>Parameters</b>:<br>
<table>
{% for k, v in items %}
{% if k != 'origin' %}
<tr>
<td>{{ k }}</td>
<td>{{ v }}</td>
</tr>
{% endif %}
{% endfor %}
</table>
''').render(**locals())
if task.email:
utils.send_email(task.email, subject, content)
"""
return f(*args, **kwargs)
return wrapper
def json_response(obj):
"""
returns a json response from a json serializable python object
"""
return Response(
response=json.dumps(
obj, indent=4, cls=utils.AirflowJsonEncoder),
status=200,
mimetype="application/json")
def gzipped(f):
'''
Decorator to make a view compressed
'''
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (response.status_code < 200 or
response.status_code >= 300 or
'Content-Encoding' in response.headers):
return response
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode='wb',
fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
return f(*args, **kwargs)
return view_func
def make_cache_key(*args, **kwargs):
'''
Used by cache to get a unique key per URL
'''
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore')
class AceEditorWidget(wtforms.widgets.TextArea):
"""
Renders an ACE code editor.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
html = '''
<div id="{el_id}" style="height:100px;">{contents}</div>
<textarea
id="{el_id}_ace" name="{form_name}"
style="display:none;visibility:hidden;">
</textarea>
'''.format(
el_id=kwargs.get('id', field.id),
contents=escape(text_type(field._value())),
form_name=field.id,
)
return wtforms.widgets.core.HTMLString(html)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
from sqlalchemy.orm import exc
from quantum.common import exceptions as q_exc
from quantum.openstack.common import log as logging
from quantum.plugins.cisco.common import cisco_exceptions as c_exc
from quantum.plugins.cisco.common import config
from quantum.plugins.cisco.db import l2network_models
import quantum.plugins.cisco.db.api as db
LOG = logging.getLogger(__name__)
def initialize():
"""Establish database connection and load models"""
db.configure_db()
def create_vlanids():
"""Prepopulates the vlan_bindings table"""
LOG.debug(_("create_vlanids() called"))
session = db.get_session()
try:
vlanid = session.query(l2network_models.VlanID).one()
except exc.MultipleResultsFound:
pass
except exc.NoResultFound:
start = int(config.CISCO.vlan_start)
end = int(config.CISCO.vlan_end)
while start <= end:
vlanid = l2network_models.VlanID(start)
session.add(vlanid)
start += 1
session.flush()
return
def get_all_vlanids():
"""Gets all the vlanids"""
LOG.debug(_("get_all_vlanids() called"))
session = db.get_session()
try:
vlanids = session.query(l2network_models.VlanID).all()
return vlanids
except exc.NoResultFound:
return []
def is_vlanid_used(vlan_id):
"""Checks if a vlanid is in use"""
LOG.debug(_("is_vlanid_used() called"))
session = db.get_session()
try:
vlanid = (session.query(l2network_models.VlanID).
filter_by(vlan_id=vlan_id).one())
return vlanid["vlan_used"]
except exc.NoResultFound:
raise c_exc.VlanIDNotFound(vlan_id=vlan_id)
def release_vlanid(vlan_id):
"""Sets the vlanid state to be unused"""
LOG.debug(_("release_vlanid() called"))
session = db.get_session()
try:
vlanid = (session.query(l2network_models.VlanID).
filter_by(vlan_id=vlan_id).one())
vlanid["vlan_used"] = False
session.merge(vlanid)
session.flush()
return vlanid["vlan_used"]
except exc.NoResultFound:
raise c_exc.VlanIDNotFound(vlan_id=vlan_id)
return
def delete_vlanid(vlan_id):
"""Deletes a vlanid entry from db"""
LOG.debug(_("delete_vlanid() called"))
session = db.get_session()
try:
vlanid = (session.query(l2network_models.VlanID).
filter_by(vlan_id=vlan_id).one())
session.delete(vlanid)
session.flush()
return vlanid
except exc.NoResultFound:
pass
def reserve_vlanid():
"""Reserves the first unused vlanid"""
LOG.debug(_("reserve_vlanid() called"))
session = db.get_session()
try:
rvlan = (session.query(l2network_models.VlanID).
filter_by(vlan_used=False).first())
if not rvlan:
raise exc.NoResultFound
rvlanid = (session.query(l2network_models.VlanID).
filter_by(vlan_id=rvlan["vlan_id"]).one())
rvlanid["vlan_used"] = True
session.merge(rvlanid)
session.flush()
return rvlan["vlan_id"]
except exc.NoResultFound:
raise c_exc.VlanIDNotAvailable()
def get_all_vlanids_used():
"""Gets all the vlanids used"""
LOG.debug(_("get_all_vlanids() called"))
session = db.get_session()
try:
vlanids = (session.query(l2network_models.VlanID).
filter_by(vlan_used=True).all())
return vlanids
except exc.NoResultFound:
return []
def get_all_vlan_bindings():
"""Lists all the vlan to network associations"""
LOG.debug(_("get_all_vlan_bindings() called"))
session = db.get_session()
try:
bindings = session.query(l2network_models.VlanBinding).all()
return bindings
except exc.NoResultFound:
return []
def get_vlan_binding(netid):
"""Lists the vlan given a network_id"""
LOG.debug(_("get_vlan_binding() called"))
session = db.get_session()
try:
binding = (session.query(l2network_models.VlanBinding).
filter_by(network_id=netid).one())
return binding
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=netid)
def add_vlan_binding(vlanid, vlanname, netid):
"""Adds a vlan to network association"""
LOG.debug(_("add_vlan_binding() called"))
session = db.get_session()
try:
binding = (session.query(l2network_models.VlanBinding).
filter_by(vlan_id=vlanid).one())
raise c_exc.NetworkVlanBindingAlreadyExists(vlan_id=vlanid,
network_id=netid)
except exc.NoResultFound:
binding = l2network_models.VlanBinding(vlanid, vlanname, netid)
session.add(binding)
session.flush()
return binding
def remove_vlan_binding(netid):
"""Removes a vlan to network association"""
LOG.debug(_("remove_vlan_binding() called"))
session = db.get_session()
try:
binding = (session.query(l2network_models.VlanBinding).
filter_by(network_id=netid).one())
session.delete(binding)
session.flush()
return binding
except exc.NoResultFound:
pass
def update_vlan_binding(netid, newvlanid=None, newvlanname=None):
"""Updates a vlan to network association"""
LOG.debug(_("update_vlan_binding() called"))
session = db.get_session()
try:
binding = (session.query(l2network_models.VlanBinding).
filter_by(network_id=netid).one())
if newvlanid:
binding["vlan_id"] = newvlanid
if newvlanname:
binding["vlan_name"] = newvlanname
session.merge(binding)
session.flush()
return binding
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=netid)
def get_all_qoss(tenant_id):
"""Lists all the qos to tenant associations"""
LOG.debug(_("get_all_qoss() called"))
session = db.get_session()
try:
qoss = (session.query(l2network_models.QoS).
filter_by(tenant_id=tenant_id).all())
return qoss
except exc.NoResultFound:
return []
def get_qos(tenant_id, qos_id):
"""Lists the qos given a tenant_id and qos_id"""
LOG.debug(_("get_qos() called"))
session = db.get_session()
try:
qos = (session.query(l2network_models.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
return qos
except exc.NoResultFound:
raise c_exc.QosNotFound(qos_id=qos_id,
tenant_id=tenant_id)
def add_qos(tenant_id, qos_name, qos_desc):
"""Adds a qos to tenant association"""
LOG.debug(_("add_qos() called"))
session = db.get_session()
try:
qos = (session.query(l2network_models.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_name=qos_name).one())
raise c_exc.QosNameAlreadyExists(qos_name=qos_name,
tenant_id=tenant_id)
except exc.NoResultFound:
qos = l2network_models.QoS(tenant_id, qos_name, qos_desc)
session.add(qos)
session.flush()
return qos
def remove_qos(tenant_id, qos_id):
"""Removes a qos to tenant association"""
session = db.get_session()
try:
qos = (session.query(l2network_models.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
session.delete(qos)
session.flush()
return qos
except exc.NoResultFound:
pass
def update_qos(tenant_id, qos_id, new_qos_name=None):
"""Updates a qos to tenant association"""
session = db.get_session()
try:
qos = (session.query(l2network_models.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
if new_qos_name:
qos["qos_name"] = new_qos_name
session.merge(qos)
session.flush()
return qos
except exc.NoResultFound:
raise c_exc.QosNotFound(qos_id=qos_id,
tenant_id=tenant_id)
def get_all_credentials(tenant_id):
"""Lists all the creds for a tenant"""
session = db.get_session()
try:
creds = (session.query(l2network_models.Credential).
filter_by(tenant_id=tenant_id).all())
return creds
except exc.NoResultFound:
return []
def get_credential(tenant_id, credential_id):
"""Lists the creds for given a cred_id and tenant_id"""
session = db.get_session()
try:
cred = (session.query(l2network_models.Credential).
filter_by(tenant_id=tenant_id).
filter_by(credential_id=credential_id).one())
return cred
except exc.NoResultFound:
raise c_exc.CredentialNotFound(credential_id=credential_id,
tenant_id=tenant_id)
def get_credential_name(tenant_id, credential_name):
"""Lists the creds for given a cred_name and tenant_id"""
session = db.get_session()
try:
cred = (session.query(l2network_models.Credential).
filter_by(tenant_id=tenant_id).
filter_by(credential_name=credential_name).one())
return cred
except exc.NoResultFound:
raise c_exc.CredentialNameNotFound(credential_name=credential_name,
tenant_id=tenant_id)
def add_credential(tenant_id, credential_name, user_name, password):
"""Adds a qos to tenant association"""
session = db.get_session()
try:
cred = (session.query(l2network_models.Credential).
filter_by(tenant_id=tenant_id).
filter_by(credential_name=credential_name).one())
raise c_exc.CredentialAlreadyExists(credential_name=credential_name,
tenant_id=tenant_id)
except exc.NoResultFound:
cred = l2network_models.Credential(tenant_id, credential_name,
user_name, password)
session.add(cred)
session.flush()
return cred
def remove_credential(tenant_id, credential_id):
"""Removes a credential from a tenant"""
session = db.get_session()
try:
cred = (session.query(l2network_models.Credential).
filter_by(tenant_id=tenant_id).
filter_by(credential_id=credential_id).one())
session.delete(cred)
session.flush()
return cred
except exc.NoResultFound:
pass
def update_credential(tenant_id, credential_id,
new_user_name=None, new_password=None):
"""Updates a credential for a tenant"""
session = db.get_session()
try:
cred = (session.query(l2network_models.Credential).
filter_by(tenant_id=tenant_id).
filter_by(credential_id=credential_id).one())
if new_user_name:
cred["user_name"] = new_user_name
if new_password:
cred["password"] = new_password
session.merge(cred)
session.flush()
return cred
except exc.NoResultFound:
raise c_exc.CredentialNotFound(credential_id=credential_id,
tenant_id=tenant_id)
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import inspect
import os
import argparse
import math
from os import listdir, remove, makedirs
from shutil import copyfile
from os.path import isdir, join, exists, split, relpath, splitext, abspath, commonprefix
from subprocess import Popen, PIPE, STDOUT, call
import json
from collections import OrderedDict
import logging
def compile_worker(job):
results = []
for command in job['commands']:
try:
_, _stderr, _rc = run_cmd(command, work_dir=job['work_dir'], chroot=job['chroot'])
except KeyboardInterrupt as e:
raise ToolException
results.append({
'code': _rc,
'output': _stderr,
'command': command
})
return {
'source': job['source'],
'object': job['object'],
'commands': job['commands'],
'results': results
}
def cmd(l, check=True, verbose=False, shell=False, cwd=None):
text = l if shell else ' '.join(l)
if verbose:
print text
rc = call(l, shell=shell, cwd=cwd)
if check and rc != 0:
raise Exception('ERROR %d: "%s"' % (rc, text))
def run_cmd(command, work_dir=None, chroot=None, redirect=False):
if chroot:
# Conventions managed by the web team for the mbed.org build system
chroot_cmd = [
'/usr/sbin/chroot', '--userspec=33:33', chroot
]
for c in command:
chroot_cmd += [c.replace(chroot, '')]
logging.debug("Running command %s"%' '.join(chroot_cmd))
command = chroot_cmd
work_dir = None
try:
p = Popen(command, stdout=PIPE, stderr=STDOUT if redirect else PIPE, cwd=work_dir)
_stdout, _stderr = p.communicate()
except OSError as e:
print "[OS ERROR] Command: "+(' '.join(command))
raise
return _stdout, _stderr, p.returncode
def run_cmd_ext(command):
assert is_cmd_valid(command[0])
p = Popen(command, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = p.communicate()
return _stdout, _stderr, p.returncode
def is_cmd_valid(cmd):
caller = get_caller_name()
abspath = find_cmd_abspath(cmd)
if not abspath:
error("%s: Command '%s' can't be found" % (caller, cmd))
if not is_exec(abspath):
error("%s: Command '%s' resolves to file '%s' which is not executable" % (caller, cmd, abspath))
return True
def is_exec(path):
return os.access(path, os.X_OK) or os.access(path+'.exe', os.X_OK)
def find_cmd_abspath(cmd):
""" Returns the absolute path to a command.
None is returned if no absolute path was found.
"""
if exists(cmd) or exists(cmd + '.exe'):
return os.path.abspath(cmd)
if not 'PATH' in os.environ:
raise Exception("Can't find command path for current platform ('%s')" % sys.platform)
PATH=os.environ['PATH']
for path in PATH.split(os.pathsep):
abspath = '%s/%s' % (path, cmd)
if exists(abspath) or exists(abspath + '.exe'):
return abspath
def mkdir(path):
if not exists(path):
makedirs(path)
def copy_file(src, dst):
""" Implement the behaviour of "shutil.copy(src, dst)" without copying the
permissions (this was causing errors with directories mounted with samba)
"""
if isdir(dst):
_, file = split(src)
dst = join(dst, file)
copyfile(src, dst)
def delete_dir_files(dir):
if not exists(dir):
return
for f in listdir(dir):
file = join(dir, f)
if not isdir(file):
remove(file)
def get_caller_name(steps=2):
"""
When called inside a function, it returns the name
of the caller of that function.
"""
return inspect.stack()[steps][3]
def error(msg):
print("ERROR: %s" % msg)
sys.exit(1)
def rel_path(path, base, dot=False):
p = relpath(path, base)
if dot and not p.startswith('.'):
p = './' + p
return p
class ToolException(Exception):
pass
class NotSupportedException(Exception):
pass
def split_path(path):
base, file = split(path)
name, ext = splitext(file)
return base, name, ext
def args_error(parser, message):
print "\n\n%s\n\n" % message
parser.print_help()
sys.exit()
def construct_enum(**enums):
""" Create your own pseudo-enums """
return type('Enum', (), enums)
def check_required_modules(required_modules, verbose=True):
""" Function checks for Python modules which should be "importable" (installed)
before test suite can be used.
@return returns True if all modules are installed already
"""
import imp
not_installed_modules = []
for module_name in required_modules:
try:
imp.find_module(module_name)
except ImportError as e:
# We also test against a rare case: module is an egg file
try:
__import__(module_name)
except ImportError as e:
not_installed_modules.append(module_name)
if verbose:
print "Error: %s" % e
if verbose:
if not_installed_modules:
print "Warning: Module(s) %s not installed. Please install required module(s) before using this script."% (', '.join(not_installed_modules))
if not_installed_modules:
return False
else:
return True
# Utility function: traverse a dictionary and change all the strings in the dictionary to
# ASCII from Unicode. Useful when reading ASCII JSON data, because the JSON decoder always
# returns Unicode string.
# Based on http://stackoverflow.com/a/13105359
def dict_to_ascii(input):
if isinstance(input, dict):
return OrderedDict([(dict_to_ascii(key), dict_to_ascii(value)) for key, value in input.iteritems()])
elif isinstance(input, list):
return [dict_to_ascii(element) for element in input]
elif isinstance(input, unicode):
return input.encode('ascii')
else:
return input
# Read a JSON file and return its Python representation, transforming all the strings from Unicode
# to ASCII. The order of keys in the JSON file is preserved.
def json_file_to_dict(fname):
try:
with open(fname, "rt") as f:
return dict_to_ascii(json.load(f, object_pairs_hook=OrderedDict))
except (ValueError, IOError):
sys.stderr.write("Error parsing '%s':\n" % fname)
raise
# Wowza, double closure
def argparse_type(casedness, prefer_hyphen=False) :
def middle(list, type_name):
# validate that an argument passed in (as string) is a member of the list of possible
# arguments. Offer a suggestion if the case of the string, or the hyphens/underscores
# do not match the expected style of the argument.
def parse_type(string):
if prefer_hyphen: newstring = casedness(string).replace("_","-")
else: newstring = casedness(string).replace("-","_")
if string in list:
return string
elif string not in list and newstring in list:
raise argparse.ArgumentTypeError("{0} is not a supported {1}. Did you mean {2}?".format(string, type_name, newstring))
else:
raise argparse.ArgumentTypeError("{0} is not a supported {1}. Supported {1}s are:\n{2}".format(string, type_name, columnate(list)))
return parse_type
return middle
# short cuts for the argparse_type versions
argparse_uppercase_type = argparse_type(str.upper, False)
argparse_lowercase_type = argparse_type(str.lower, False)
argparse_uppercase_hyphen_type = argparse_type(str.upper, True)
argparse_lowercase_hyphen_type = argparse_type(str.lower, True)
def argparse_force_type(case):
def middle(list, type_name):
# validate that an argument passed in (as string) is a member of the list of possible
# arguments after converting it's case. Offer a suggestion if the hyphens/underscores
# do not match the expected style of the argument.
def parse_type(string):
for option in list:
if case(string) == case(option):
return option
raise argparse.ArgumentTypeError("{0} is not a supported {1}. Supported {1}s are:\n{2}".format(string, type_name, columnate(list)))
return parse_type
return middle
# these two types convert the case of their arguments _before_ validation
argparse_force_uppercase_type = argparse_force_type(str.upper)
argparse_force_lowercase_type = argparse_force_type(str.lower)
# An argument parser combinator that takes in an argument parser and creates a new parser that
# accepts a comma separated list of the same thing.
def argparse_many(fn):
def wrap(string):
return [fn(s) for s in string.split(",")]
return wrap
# An argument parser that verifies that a string passed in corresponds to a file
def argparse_filestring_type(string) :
if exists(string) :
return string
else :
raise argparse.ArgumentTypeError("{0}"" does not exist in the filesystem.".format(string))
# render a list of strings as a in a bunch of columns
def columnate(strings, seperator=", ", chars=80):
col_width = max(len(s) for s in strings)
total_width = col_width + len(seperator)
columns = math.floor(chars / total_width)
output = ""
for i, s in zip(range(len(strings)), strings):
append = s
if i != len(strings) - 1:
append += seperator
if i % columns == columns - 1:
append += "\n"
else:
append = append.ljust(total_width)
output += append
return output
# fail if argument provided is a parent of the specified directory
def argparse_dir_not_parent(other):
def parse_type(not_parent):
abs_other = abspath(other)
abs_not_parent = abspath(not_parent)
if abs_not_parent == commonprefix([abs_not_parent, abs_other]):
raise argparse.ArgumentTypeError("{0} may not be a parent directory of {1}".format(not_parent, other))
else:
return not_parent
return parse_type
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import VCoinTestFramework
from test_framework.util import *
from test_framework.comptool import wait_until
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(VCoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("VCOIND", "vcoind"),
help="vcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * 1000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available / old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functionality related to notifications common to multiple layers of
the system.
"""
import datetime
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import timeutils
import six
import nova.context
from nova import db
from nova.i18n import _LE
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.objects import base as obj_base
from nova.openstack.common import context as common_context
from nova.openstack.common import log
from nova import rpc
from nova import utils
LOG = log.getLogger(__name__)
notify_opts = [
cfg.StrOpt('notify_on_state_change',
help='If set, send compute.instance.update notifications on instance '
'state changes. Valid values are None for no notifications, '
'"vm_state" for notifications on VM state changes, or '
'"vm_and_task_state" for notifications on VM and task state '
'changes.'),
cfg.BoolOpt('notify_api_faults', default=False,
help='If set, send api.fault notifications on caught exceptions '
'in the API service.'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notify_opts)
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param fn: - object of the function
:returns: fn -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = common_context.get_context_from_function_and_args(
fn, args, kwarg)
notifier = rpc.get_notifier('api',
publisher_id=(CONF.default_publisher_id
or CONF.host))
method = notifier.getattr(CONF.default_notification_level.lower(),
'info')
method(ctxt, name, body)
return fn(*args, **kwarg)
return wrapped_func
def send_api_fault(url, status, exception):
"""Send an api.fault notification."""
if not CONF.notify_api_faults:
return
payload = {'url': url, 'exception': six.text_type(exception),
'status': status}
rpc.get_notifier('api').error(None, 'api.fault', payload)
def send_update(context, old_instance, new_instance, service="compute",
host=None):
"""Send compute.instance.update notification to report any changes occurred
in that instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
update_with_state_change = False
old_vm_state = old_instance["vm_state"]
new_vm_state = new_instance["vm_state"]
old_task_state = old_instance["task_state"]
new_task_state = new_instance["task_state"]
# we should check if we need to send a state change or a regular
# notification
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
update_with_state_change = True
elif (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
if update_with_state_change:
# send a notification with state changes
# value of verify_states need not be True as the check for states is
# already done here
send_update_with_states(context, new_instance, old_vm_state,
new_vm_state, old_task_state, new_task_state, service, host)
else:
try:
old_display_name = None
if new_instance["display_name"] != old_instance["display_name"]:
old_display_name = old_instance["display_name"]
_send_instance_update_notification(context, new_instance,
service=service, host=host,
old_display_name=old_display_name)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=new_instance)
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
old_task_state, new_task_state, service="compute", host=None,
verify_states=False):
"""Send compute.instance.update notification to report changes if there
are any, in the instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
fire_update = True
# send update notification by default
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
# do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
fire_update = True
elif (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
if fire_update:
# send either a state change or a regular notification
try:
_send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
new_vm_state=new_vm_state, new_task_state=new_task_state,
service=service, host=host)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=instance)
def _compute_states_payload(instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None):
# If the states were not specified we assume the current instance
# states are the correct information. This is important to do for
# both old and new states because otherwise we create some really
# confusing nofications like:
#
# None(None) => Building(none)
#
# When we really were just continuing to build
if new_vm_state is None:
new_vm_state = instance["vm_state"]
if new_task_state is None:
new_task_state = instance["task_state"]
if old_vm_state is None:
old_vm_state = instance["vm_state"]
if old_task_state is None:
old_task_state = instance["task_state"]
states_payload = {
"old_state": old_vm_state,
"state": new_vm_state,
"old_task_state": old_task_state,
"new_task_state": new_task_state,
}
return states_payload
def _send_instance_update_notification(context, instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None,
service="compute", host=None, old_display_name=None):
"""Send 'compute.instance.update' notification to inform observers
about instance state changes.
"""
payload = info_from_instance(context, instance, None, None)
# determine how we'll report states
payload.update(
_compute_states_payload(
instance, old_vm_state, old_task_state,
new_vm_state, new_task_state))
# add audit fields:
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload["audit_period_beginning"] = audit_start
payload["audit_period_ending"] = audit_end
# add bw usage info:
bw = bandwidth_usage(instance, audit_start)
payload["bandwidth"] = bw
# add old display name if it is changed
if old_display_name:
payload["old_display_name"] = old_display_name
rpc.get_notifier(service, host).info(context,
'compute.instance.update', payload)
def audit_period_bounds(current_period=False):
"""Get the start and end of the relevant audit usage period
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
"""
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end
return (audit_start, audit_end)
def bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data=True):
"""Get bandwidth usage information for the instance for the
specified audit period.
"""
admin_context = nova.context.get_admin_context(read_deleted='yes')
def _get_nwinfo_old_skool():
"""Support for getting network info without objects."""
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
if isinstance(cached_info, network_model.NetworkInfo):
return cached_info
return network_model.NetworkInfo.hydrate(cached_info)
try:
return network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
try:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to get nw_info'),
instance=instance_ref)
except Exception:
if ignore_missing_network_data:
return
raise
# FIXME(comstud): Temporary as we transition to objects.
if isinstance(instance_ref, obj_base.NovaObject):
nw_info = instance_ref.info_cache.network_info
if nw_info is None:
nw_info = network_model.NetworkInfo()
else:
nw_info = _get_nwinfo_old_skool()
macs = [vif['address'] for vif in nw_info]
uuids = [instance_ref["uuid"]]
bw_usages = db.bw_usage_get_by_uuids(admin_context, uuids, audit_start)
bw_usages = [b for b in bw_usages if b.mac in macs]
bw = {}
for b in bw_usages:
label = 'net-name-not-found-%s' % b['mac']
for vif in nw_info:
if vif['address'] == b['mac']:
label = vif['network']['label']
break
bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
return bw
def image_meta(system_metadata):
"""Format image metadata for use in notifications from the instance
system metadata.
"""
image_meta = {}
for md_key, md_value in system_metadata.iteritems():
if md_key.startswith('image_'):
image_meta[md_key[6:]] = md_value
return image_meta
def info_from_instance(context, instance, network_info,
system_metadata, **kw):
"""Get detailed instance information for an instance which is common to all
notifications.
:param:instance: nova.objects.Instance
:param:network_info: network_info provided if not None
:param:system_metadata: system_metadata DB entries for the instance,
if not None
.. note::
Currently unused here in trunk, but needed for potential custom
modifications.
"""
def null_safe_str(s):
return str(s) if s else ''
def null_safe_int(s):
return int(s) if s else ''
def null_safe_isotime(s):
if isinstance(s, datetime.datetime):
return timeutils.strtime(s)
else:
return str(s) if s else ''
image_ref_url = glance.generate_image_url(instance.image_ref)
instance_type = instance.get_flavor()
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
instance_info = dict(
# Owner properties
tenant_id=instance.project_id,
user_id=instance.user_id,
# Identity properties
instance_id=instance.uuid,
display_name=instance.display_name,
reservation_id=instance.reservation_id,
hostname=instance.hostname,
# Type properties
instance_type=instance_type_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,
# Capacity properties
memory_mb=instance.memory_mb,
disk_gb=instance.root_gb + instance.ephemeral_gb,
vcpus=instance.vcpus,
# Note(dhellmann): This makes the disk_gb value redundant, but
# we are keeping it for backwards-compatibility with existing
# users of notifications.
root_gb=instance.root_gb,
ephemeral_gb=instance.ephemeral_gb,
# Location properties
host=instance.host,
node=instance.node,
availability_zone=instance.availability_zone,
cell_name=null_safe_str(instance.cell_name),
# Date properties
created_at=str(instance.created_at),
# Terminated and Deleted are slightly different (although being
# terminated and not deleted is a transient state), so include
# both and let the recipient decide which they want to use.
terminated_at=null_safe_isotime(instance.get('terminated_at', None)),
deleted_at=null_safe_isotime(instance.get('deleted_at', None)),
launched_at=null_safe_isotime(instance.get('launched_at', None)),
# Image properties
image_ref_url=image_ref_url,
os_type=instance.os_type,
kernel_id=instance.kernel_id,
ramdisk_id=instance.ramdisk_id,
# Status properties
state=instance.vm_state,
state_description=null_safe_str(instance.task_state),
progress=null_safe_int(instance.progress),
# accessIPs
access_ip_v4=instance.access_ip_v4,
access_ip_v6=instance.access_ip_v6,
)
if network_info is not None:
fixed_ips = []
for vif in network_info:
for ip in vif.fixed_ips():
ip["label"] = vif["network"]["label"]
ip["vif_mac"] = vif["address"]
fixed_ips.append(ip)
instance_info['fixed_ips'] = fixed_ips
# add image metadata
image_meta_props = image_meta(instance.system_metadata)
instance_info["image_meta"] = image_meta_props
# add instance metadata
instance_info['metadata'] = instance.metadata
instance_info.update(kw)
return instance_info
|
|
'''
Proto-Beads & their filesystem layout
'''
import os
import zipfile
from . import layouts
from . import meta
from . import tech
from .bead import Bead
# technology modules
persistence = tech.persistence
securehash = tech.securehash
fs = tech.fs
# generated with `uuidgen -t`
META_VERSION = 'aaa947a6-1f7a-11e6-ba3a-0021cc73492e'
class Workspace(Bead):
directory = None
def __init__(self, directory):
self.directory = fs.Path(os.path.abspath(directory))
@property
def is_valid(self):
dir = self.directory
return all(
(
os.path.isdir(dir / layouts.Workspace.INPUT),
os.path.isdir(dir / layouts.Workspace.OUTPUT),
os.path.isdir(dir / layouts.Workspace.TEMP),
os.path.isfile(dir / layouts.Workspace.BEAD_META)))
@property
def _meta_filename(self):
return self.directory / layouts.Workspace.BEAD_META
@property
def meta(self):
return persistence.file_load(self._meta_filename)
@meta.setter
def meta(self, meta):
persistence.file_dump(meta, self._meta_filename)
# Bead properties
@property
def kind(self):
return self.meta[meta.KIND]
@property
def name(self):
return os.path.basename(self.directory)
@property
def inputs(self):
return tuple(meta.parse_inputs(self.meta))
# faked Bead properties
@property
def content_id(self):
# note, that it is not a valid, unique
# content_id for referencing
# however it is easily recognisable on graphs
return f'<WORKSPACE {self.directory}>'
@property
def freeze_time_str(self):
return tech.timestamp.timestamp()
@property
def box_name(self):
return '<UNSAVED>'
# workspace constructors
def create(self, kind):
'''
Set up an empty project structure.
Works with either an empty directory or a directory to be created.
'''
dir = self.directory
assert not os.path.exists(dir)
self.create_directories()
bead_meta = {
meta.KIND: kind,
meta.INPUTS: {}}
fs.write_file(
dir / layouts.Workspace.BEAD_META,
persistence.dumps(bead_meta))
assert self.is_valid
def create_directories(self):
dir = self.directory
fs.ensure_directory(dir)
fs.ensure_directory(dir / layouts.Workspace.INPUT)
fs.make_readonly(dir / layouts.Workspace.INPUT)
fs.ensure_directory(dir / layouts.Workspace.OUTPUT)
fs.ensure_directory(dir / layouts.Workspace.TEMP)
fs.ensure_directory(dir / layouts.Workspace.META)
def pack(self, zipfilename, freeze_time, comment):
'''
Create archive from workspace.
'''
assert not os.path.exists(zipfilename)
try:
_ZipCreator().create(zipfilename, self, freeze_time, comment)
except (RuntimeError, Exception):
if os.path.exists(zipfilename):
os.remove(zipfilename)
raise
def has_input(self, input_nick):
'''
Is there an input defined for input_nick?
NOTE: it is not necessarily loaded!
'''
return input_nick in self.meta[meta.INPUTS]
def is_loaded(self, input_nick):
return os.path.isdir(
self.directory / layouts.Workspace.INPUT / input_nick)
def add_input(self, input_nick, kind, content_id, freeze_time_str):
m = self.meta
m[meta.INPUTS][input_nick] = {
meta.INPUT_KIND: kind,
meta.INPUT_CONTENT_ID: content_id,
meta.INPUT_FREEZE_TIME: freeze_time_str}
self.meta = m
def delete_input(self, input_nick):
assert self.has_input(input_nick)
if self.is_loaded(input_nick):
self.unload(input_nick)
m = self.meta
del m[meta.INPUTS][input_nick]
self.meta = m
@property
def _input_map_filename(self):
return self.directory / layouts.Workspace.INPUT_MAP
@property
def input_map(self):
"""
Map from local (bead specific) input nicks to real (more widely recognised) bead names
"""
try:
return persistence.file_load(self._input_map_filename)
except:
return {}
@input_map.setter
def input_map(self, input_map):
persistence.file_dump(input_map, self._input_map_filename)
def get_input_bead_name(self, input_nick):
'''
Returns the name on which update works.
'''
return self.input_map.get(input_nick, input_nick)
def set_input_bead_name(self, input_nick, bead_name):
'''
Sets the name to be used for updates in the future.
'''
input_map = self.input_map
input_map[input_nick] = bead_name
self.input_map = input_map
def load(self, input_nick, bead):
'''
Make output data files in bead available under input directory
'''
input_dir = self.directory / layouts.Workspace.INPUT
fs.make_writable(input_dir)
try:
self.add_input(
input_nick,
bead.kind, bead.content_id, bead.freeze_time_str)
destination_dir = input_dir / input_nick
bead.unpack_data_to(destination_dir)
for f in fs.all_subpaths(destination_dir):
fs.make_readonly(f)
finally:
fs.make_readonly(input_dir)
def unload(self, input_nick):
'''
Remove files for given input
'''
assert self.has_input(input_nick)
input_dir = self.directory / layouts.Workspace.INPUT
fs.make_writable(input_dir)
try:
fs.rmtree(input_dir / input_nick)
finally:
fs.make_readonly(input_dir)
def __repr__(self):
# default values are printed as repr of the value
return self.directory
@classmethod
def for_current_working_directory(cls):
'''
Create Workspace based on current working directory.
Determine the correct Workspace for the current working directory.
As a result, the returned workspace may be for a parent directory,
if the cwd is under a valid workspace, but not at its root.
Can return an invalid Workspace.
'''
cwd = cls(os.getcwd())
ws = cwd
while not ws.is_valid:
parent = ws.directory / '..'
if parent == ws.directory:
return cwd
ws = cls(parent)
return ws
class _ZipCreator:
def __init__(self):
self.hashes = {}
self.zipfile = None
def add_hash(self, path, hash):
assert path not in self.hashes
self.hashes[path] = hash
def add_file(self, path, zip_path):
self.zipfile.write(path, zip_path)
self.add_hash(
zip_path,
securehash.file(open(path, 'rb'), os.path.getsize(path)))
def add_path(self, path, zip_path):
if os.path.isdir(path):
self.add_directory(path, zip_path)
else:
assert os.path.isfile(path), '%s is neither a file nor a directory' % path
self.add_file(path, zip_path)
def add_directory(self, path, zip_path):
for f in os.listdir(path):
self.add_path(path / f, zip_path / f)
def add_string_content(self, zip_path, string):
bytes = string.encode('utf-8')
self.zipfile.writestr(zip_path, bytes)
self.add_hash(zip_path, securehash.bytes(bytes))
def create(self, zip_file_name, workspace, timestamp, comment):
assert workspace.is_valid
user_compression_preference = os.environ.get('BEAD_ZIP_COMPRESSION')
compression = {
'off': zipfile.ZIP_STORED,
'stored': zipfile.ZIP_STORED,
# these are not universally supported compression methods
# 'lzma': zipfile.ZIP_LZMA,
# 'bz2': zipfile.ZIP_BZIP2,
'deflated': zipfile.ZIP_DEFLATED,
}.get(user_compression_preference, zipfile.ZIP_DEFLATED)
try:
with zipfile.ZipFile(
zip_file_name,
mode='w',
compression=compression,
allowZip64=True,
) as self.zipfile:
self.zipfile.comment = comment.encode('utf-8')
self.add_data(workspace)
self.add_code(workspace)
self.add_meta(workspace, timestamp)
finally:
self.zipfile = None
def add_code(self, workspace):
source_directory = workspace.directory
def is_code(f):
return f not in {
layouts.Workspace.INPUT,
layouts.Workspace.OUTPUT,
layouts.Workspace.META,
layouts.Workspace.TEMP}
for f in sorted(os.listdir(source_directory)):
if is_code(f):
self.add_path(
source_directory / f,
layouts.Archive.CODE / f)
def add_data(self, workspace):
self.add_directory(
workspace.directory / layouts.Workspace.OUTPUT,
layouts.Archive.DATA)
def add_meta(self, workspace, timestamp):
bead_meta = {
meta.META_VERSION: META_VERSION,
meta.KIND: workspace.kind,
meta.FREEZE_TIME: timestamp,
meta.INPUTS: {
input.name: {
meta.INPUT_KIND: input.kind,
meta.INPUT_CONTENT_ID: input.content_id,
meta.INPUT_FREEZE_TIME: input.freeze_time_str}
for input in workspace.inputs},
meta.FREEZE_NAME: workspace.name}
self.add_string_content(layouts.Archive.BEAD_META, persistence.dumps(bead_meta))
self.add_string_content(layouts.Archive.MANIFEST, persistence.dumps(self.hashes))
persistence.zip_dump(workspace.input_map, self.zipfile, layouts.Archive.INPUT_MAP)
|
|
"""Utility functions that don't fit in the main modules"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
def connect_nfldb():
"""Connect to the nfldb database.
Rather than using the builtin method we make our own,
since we're going to use SQLAlchemy as the engine. However,
we can still make use of the information in the nfldb config
file to get information like username and password, which
means this function doesn't need any arguments.
Parameters
----------
None
Returns
-------
SQLAlchemy engine object
A connected engine, ready to be used to query the DB.
Raises
------
IOError
If it can't find the config file.
"""
import nfldb
import sqlalchemy as sql
db_config, paths_tried = nfldb.db.config()
if db_config is None:
raise IOError("get_play_data: could not find database config! Looked"
" in these places: {0}".format(paths_tried))
db_config["drivername"] = "postgres"
db_config["username"] = db_config["user"]
del db_config["user"]
del db_config["timezone"]
engine = sql.create_engine(sql.engine.url.URL(**db_config))
return engine
def get_nfldb_play_data(season_years=None, season_types=["Regular", "Postseason"]):
"""Get play-by-play data from the nfldb database.
We use a specialized query and then postprocessing because, while possible to
do using the objects created by ``nfldb``, it is *orders of magnitude slower*.
This is due to the more general nature of ``nfldb``, which is not really designed
for this kind of data mining. Since we need to get a lot of data in a single way,
it's much simpler to interact at a lower level with the underlying postgres
database.
Parameters
----------
season_years : list (default=None)
A list of all years to get data for (earliest year in nfldb is 2009).
If ``None``, get data from all available seasons.
season_types : list (default=["Regular", "Postseason"])
A list of all parts of seasons to get data for (acceptable values are
"Preseason", "Regular", and "Postseason"). If ``None``, get data from
all three season types.
Returns
-------
Pandas DataFrame
The play by play data, with the following columns:
* **gsis_id:** The official NFL GSIS_ID for the game.
* **drive_id:** The id of the drive, starts at 1 and increases by 1 for each new drive.
* **play_id:** The id of the play in ``nfldb``. Note that sequential plays have
increasing but not necessarily sequential values. With ``drive_id`` and ``gsis_id``,
works as a unique identifier for a given play.
* **quarter:** The quarter, prepended with "Q" (e.g. ``Q1`` means the first quarter).
Overtime periods are denoted as ``OT``, ``OT2``, and theoretically ``OT3`` if one were to
ever be played.
* **seconds_elapsed:** seconds elapsed since the start of the quarter.
* **offense_team:** The abbreviation of the team currently with possession of the ball.
* **yardline:** The current field position. Goes from -49 to 49, where negative numbers
indicate that the team with possession is on its own side of the field.
* **down:** The down. kickoffs, extra points, and similar have a down of 0.
* **yards_to_go:** How many yards needed in order to get a first down (or touchdown).
* **home_team:** The abbreviation of the home team.
* **away_team:** The abbreviation of the away team.
* **curr_home_score:** The home team's score at the start of the play.
* **curr_away_score:** The away team's score at the start of the play.
* **offense_won:** A boolean - ``True`` if the offense won the game, ``False`` otherwise. (The
database query skips tied games.)
Notes
-----
``gsis_id``, ``drive_id``, and ``play_id`` are not necessary to make the model, but
are included because they can be useful for computing things like WPA.
"""
engine = connect_nfldb()
sql_string = _make_nfldb_query_string(season_years=season_years, season_types=season_types)
plays_df = pd.read_sql(sql_string, engine)
#Fix yardline, quarter and time elapsed:
def yardline_time_fix(row):
try:
yardline = float(row['yardline'][1:-1])
except TypeError:
yardline = np.nan
split_time = row['time'].split(",")
return yardline, split_time[0][1:], float(split_time[1][:-1])
plays_df[['yardline', 'quarter', 'seconds_elapsed']] = pd.DataFrame(plays_df.apply(yardline_time_fix, axis=1).values.tolist())
plays_df.drop('time', axis=1, inplace=True)
#Set NaN downs (kickoffs, etc) to 0:
plays_df['down'] = plays_df['down'].fillna(value=0).astype(np.int8)
#Aggregate scores:
plays_df = _aggregate_nfldb_scores(plays_df)
return plays_df
def _aggregate_nfldb_scores(play_df):
"""Aggregate the raw nfldb data to get the score of every play."""
# First, add the yardline of the subsequent play to the df
play_df['next_yardline'] = play_df['yardline'].shift(-1)
#Set up the dictionary to keep track of things:
curr_home_score = 0
curr_away_score = 0
curr_gsis_id = play_df.iloc[0].gsis_id
argdict = {"curr_home_score": 0, "curr_away_score": 0, "curr_gsis_id": play_df.iloc[0].gsis_id}
#Define an internal function to actually compute the score of a given play:
def compute_current_scores(play, argdict):
#If new game, set scores to zero:
if play.gsis_id != argdict['curr_gsis_id']:
argdict['curr_home_score'] = 0
argdict['curr_away_score'] = 0
argdict['curr_gsis_id'] = play.gsis_id
#Get current score at start of play:
home_score_to_return = argdict['curr_home_score']
away_score_to_return = argdict['curr_away_score']
#Check if an extra point is missing from the data:
if play.offense_play_points == 6 and play.next_yardline < 0:
play.offense_play_points += 1
if play.defense_play_points == 6 and play.next_yardline < 0:
play.defense_play_points += 1
#Update scores, if necessary:
if play.offense_team == play.home_team:
argdict['curr_home_score'] += play.offense_play_points
argdict['curr_away_score'] += play.defense_play_points
else:
argdict['curr_home_score'] += play.defense_play_points
argdict['curr_away_score'] += play.offense_play_points
return home_score_to_return, away_score_to_return
#Apply function to data:
#TODO (AndrewRook): Make the .apply function go faster, currently it's a large bottleneck
aggregate_scores = play_df.apply(compute_current_scores, axis=1, args=(argdict,))
aggregate_scores = pd.DataFrame(aggregate_scores.values.tolist())
play_df[['curr_home_score', 'curr_away_score']] = aggregate_scores
#Drop unnecessary columns:
play_df.drop(labels=["next_yardline", "offense_play_points", "defense_play_points"],
axis=1, inplace=True)
return play_df
def _make_nfldb_query_string(season_years=None, season_types=None):
"""Construct the query string to get all the play data.
This way is a little more compact and robust than specifying
the string in the function that uses it.
"""
play_fields = ['gsis_id', 'drive_id', 'play_id',
'time', 'pos_team AS offense_team', 'yardline', 'down',
'yards_to_go']
offense_play_points = ("GREATEST("
"(agg_play.fumbles_rec_tds * 6), "
"(agg_play.kicking_rec_tds * 6), "
"(agg_play.passing_tds * 6), "
"(agg_play.receiving_tds * 6), "
"(agg_play.rushing_tds * 6), "
"(agg_play.kicking_xpmade * 1), "
"(agg_play.passing_twoptm * 2), "
"(agg_play.receiving_twoptm * 2), "
"(agg_play.rushing_twoptm * 2), "
"(agg_play.kicking_fgm * 3)) "
"AS offense_play_points")
defense_play_points = ("GREATEST("
"(agg_play.defense_frec_tds * 6), "
"(agg_play.defense_int_tds * 6), "
"(agg_play.defense_misc_tds * 6), "
"(agg_play.kickret_tds * 6), "
"(agg_play.puntret_tds * 6), "
"(agg_play.defense_safe * 2)) "
"AS defense_play_points")
game_fields = ("game.home_team, game.away_team, "
"((game.home_score > game.away_score AND play.pos_team = game.home_team) "
"OR (game.away_score > game.home_score AND play.pos_team = game.away_team)) AS offense_won")
where_clause = ("WHERE game.home_score != game.away_score "
"AND game.finished = TRUE "
"AND play.pos_team != 'UNK' "
"AND (play.time).phase not in ('Pregame', 'Half', 'Final')")
if season_years is not None:
where_clause += " AND game.season_year"
if len(season_years) == 1:
where_clause += " = {0}".format(season_years[0])
else:
where_clause += (" in ({0})"
"".format(",".join([str(year) for year in season_years])))
if season_types is not None:
where_clause += " AND game.season_type"
if len(season_types) == 1:
where_clause += " = '{0}'".format(season_types[0])
else:
where_clause += " in ('{0}')".format("','".join(season_types))
query_string = "SELECT "
query_string += "play." + ", play.".join(play_fields)
query_string += ", " + offense_play_points
query_string += ", " + defense_play_points
query_string += ", " + game_fields
query_string += " FROM play INNER JOIN agg_play"
query_string += (" ON play.gsis_id = agg_play.gsis_id"
" AND play.drive_id = agg_play.drive_id"
" AND play.play_id = agg_play.play_id")
query_string += " INNER JOIN game on play.gsis_id = game.gsis_id"
query_string += " " + where_clause
query_string += " ORDER BY play.gsis_id, play.drive_id, play.play_id;"
return query_string
|
|
import os
from os.path import sep, normpath, join, exists
import ntpath
import copy
from collections import namedtuple
import shutil
from subprocess import Popen, PIPE
import re
from tools.arm_pack_manager import Cache
from tools.targets import TARGET_MAP
from tools.export.exporters import Exporter
from tools.export.cmsis import DeviceCMSIS
cache_d = False
class DeviceUvision(DeviceCMSIS):
"""Uvision Device class, inherits CMSIS Device class
Encapsulates information necessary for uvision project targets"""
def __init__(self, target):
DeviceCMSIS.__init__(self, target)
dev_format = "$$Device:{0}${1}"
self.svd = ''
if self.debug_svd:
self.svd = dev_format.format(self.dname, self.debug_svd)
self.reg_file = dev_format.format(self.dname, self.compile_header)
self.debug_interface = self.uv_debug()
self.flash_dll = self.generate_flash_dll()
def uv_debug(self):
"""Return a namedtuple of information about uvision debug settings"""
UVDebug = namedtuple('UVDebug',['bin_loc','core_flag', 'key'])
# CortexMXn => pCMX
cpu = self.core.replace("Cortex-", "C")
cpu = cpu.replace("+", "")
cpu = cpu.replace("F", "")
cpu_flag = "p"+cpu
# Locations found in Keil_v5/TOOLS.INI
debuggers = {"st-link": ('STLink\\ST-LINKIII-KEIL_SWO.dll', 'ST-LINKIII-KEIL_SWO'),
"j-link":('Segger\\JL2CM3.dll', 'JL2CM3'),
"cmsis-dap":('BIN\\CMSIS_AGDI.dll', 'CMSIS_AGDI'),
"nulink":('NULink\\Nu_Link.dll','Nu_Link')}
res = debuggers[self.debug.lower()]
binary = res[0]
key = res[1]
return UVDebug(binary, cpu_flag, key)
def generate_flash_dll(self):
'''Flash DLL string from uvision
S = SW/JTAG Clock ID
C = CPU index in JTAG chain
P = Access Port
For the Options for Target -> Debug tab -> settings -> "Flash" tab in the dialog:
FD = RAM Start for Flash Functions
FC = RAM Size for Flash Functions
FN = Number of Flash types
FF = Flash File Name (without an extension)
FS = Start Address of the Flash Device
FL = Size of the Flash Device
FP = Full path to the Device algorithm (RTE)
Necessary to flash some targets. Info gathered from algorithms field of pdsc file.
'''
fl_count = 0
def get_mem_no_x(mem_str):
mem_reg = "\dx(\w+)"
m = re.search(mem_reg, mem_str)
return m.group(1) if m else None
RAMS = [(get_mem_no_x(info["start"]), get_mem_no_x(info["size"]))
for mem, info in self.target_info["memory"].items() if "RAM" in mem]
format_str = "UL2CM3(-S0 -C0 -P0 -FD{ramstart}"+" -FC{ramsize} "+"-FN{num_algos} {extra_flags})"
ramstart = ''
#Default according to Keil developer
ramsize = '1000'
if len(RAMS)>=1:
ramstart = RAMS[0][0]
extra_flags = []
for name, info in self.target_info["algorithm"].items():
if not name or not info:
continue
if int(info["default"])==0:
continue
name_reg = "\w*/([\w_]+)\.flm"
m = re.search(name_reg, name.lower())
fl_name = m.group(1) if m else None
name_flag = "-FF" + str(fl_count) + fl_name
start, size = get_mem_no_x(info["start"]), get_mem_no_x(info["size"])
rom_start_flag = "-FS"+str(fl_count)+str(start)
rom_size_flag = "-FL" + str(fl_count) + str(size)
if info["ramstart"] is not None and info["ramsize"] is not None:
ramstart = get_mem_no_x(info["ramstart"])
ramsize = get_mem_no_x(info["ramsize"])
path_flag = "-FP" + str(fl_count) + "($$Device:"+self.dname+"$"+name+")"
extra_flags.extend([name_flag, rom_start_flag, rom_size_flag, path_flag])
fl_count += 1
extra = " ".join(extra_flags)
return format_str.format(ramstart=ramstart,
ramsize=ramsize,
extra_flags=extra, num_algos=fl_count)
class Uvision(Exporter):
"""Keil Uvision class
This class encapsulates information to be contained in a Uvision
project file (.uvprojx).
The needed information can be viewed in uvision.tmpl
"""
NAME = 'uvision5'
TOOLCHAIN = 'ARM'
TARGETS = []
for target, obj in TARGET_MAP.iteritems():
if not ("ARM" in obj.supported_toolchains and hasattr(obj, "device_name")):
continue
if not DeviceCMSIS.check_supported(target):
continue
TARGETS.append(target)
#File associations within .uvprojx file
file_types = {'.cpp': 8, '.c': 1, '.s': 2,
'.obj': 3, '.o': 3, '.lib': 4,
'.ar': 4, '.h': 5, '.hpp': 5, '.sct': 4}
def uv_files(self, files):
"""An generator containing Uvision specific information about project files
Positional Arguments:
files - the location of source files
.uvprojx XML for project file:
<File>
<FileType>{{file.type}}</FileType>
<FileName>{{file.name}}</FileName>
<FilePath>{{file.loc}}</FilePath>
</File>
"""
for loc in files:
#Encapsulates the information necessary for template entry above
UVFile = namedtuple('UVFile', ['type','loc','name'])
_, ext = os.path.splitext(loc)
if ext.lower() in self.file_types:
type = self.file_types[ext.lower()]
name = ntpath.basename(normpath(loc))
yield UVFile(type, loc, name)
def format_flags(self):
"""Format toolchain flags for Uvision"""
flags = copy.deepcopy(self.flags)
# to be preprocessed with armcc
asm_flag_string = '--cpreproc --cpreproc_opts=-D__ASSERT_MSG,' + \
",".join(flags['asm_flags'])
flags['asm_flags'] = asm_flag_string
# All non-asm flags are in one template field
c_flags = list(set(flags['c_flags'] + flags['cxx_flags'] +flags['common_flags']))
# These flags are in template to be set by user i n IDE
template = ["--no_vla", "--cpp", "--c99"]
# Flag is invalid if set in template
# Optimizations are also set in the template
invalid_flag = lambda x: x in template or re.match("-O(\d|time)", x)
flags['c_flags'] = [flag for flag in c_flags if not invalid_flag(flag)]
flags['c_flags'] = " ".join(flags['c_flags'])
return flags
def format_src(self, srcs):
"""Make sources into the named tuple for use in the template"""
grouped = self.group_project_files(srcs)
for group, files in grouped.items():
grouped[group] = sorted(list(self.uv_files(files)),
key=lambda (_, __, name): name.lower())
return grouped
@staticmethod
def format_fpu(core):
"""Generate a core's FPU string"""
if core.endswith("FD"):
return "FPU3(DFPU)"
elif core.endswith("F"):
return "FPU2"
else:
return ""
def generate(self):
"""Generate the .uvproj file"""
cache = Cache(True, False)
if cache_d:
cache.cache_descriptors()
srcs = self.resources.headers + self.resources.s_sources + \
self.resources.c_sources + self.resources.cpp_sources + \
self.resources.objects + self.resources.libraries
ctx = {
'name': self.project_name,
# project_files => dict of generators - file group to generator of
# UVFile tuples defined above
'project_files': sorted(list(self.format_src(srcs).iteritems()),
key=lambda (group, _): group.lower()),
'linker_script':self.resources.linker_script,
'include_paths': '; '.join(self.resources.inc_dirs).encode('utf-8'),
'device': DeviceUvision(self.target),
}
core = ctx['device'].core
ctx['cputype'] = core.rstrip("FD")
# Turn on FPU optimizations if the core has an FPU
ctx['fpu_setting'] = 1 if 'F' not in core or 'D' in core else 2
ctx['fputype'] = self.format_fpu(core)
ctx.update(self.format_flags())
self.gen_file('uvision/uvision.tmpl', ctx, self.project_name+".uvprojx")
self.gen_file('uvision/uvision_debug.tmpl', ctx, self.project_name + ".uvoptx")
@staticmethod
def build(project_name, log_name='build_log.txt', cleanup=True):
""" Build Uvision project """
# > UV4 -r -j0 -o [log_name] [project_name].uvprojx
proj_file = project_name + ".uvprojx"
cmd = ['UV4', '-r', '-j0', '-o', log_name, proj_file]
# Build the project
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
# Print the log file to stdout
with open(log_name, 'r') as f:
print f.read()
# Cleanup the exported and built files
if cleanup:
os.remove(log_name)
os.remove(project_name+".uvprojx")
os.remove(project_name+".uvoptx")
# legacy .build directory cleaned if exists
if exists('.build'):
shutil.rmtree('.build')
if exists('BUILD'):
shutil.rmtree('BUILD')
# Returns 0 upon success, 1 upon a warning, and neither upon an error
if ret_code != 0 and ret_code != 1:
# Seems like something went wrong.
return -1
else:
return 0
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DenseNet-BC 121/169/201 + composable (2016)
# Trainable params: 7,976,808
# Paper: https://arxiv.org/pdf/1608.06993.pdf
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, AveragePooling2D, Concatenate
from tensorflow.keras.layers import Activation
from tensorflow.keras.regularizers import l2
import sys
sys.path.append('../')
from models_c import Composable
class DenseNet(Composable):
""" Construct a Densely Connected Convolution Neural Network """
# Meta-parameter: number of residual blocks in each dense group
groups = { 121 : [ { 'n_blocks': 6 }, { 'n_blocks': 12 }, { 'n_blocks': 24 }, { 'n_blocks': 16 } ], # DenseNet 121
169 : [ { 'n_blocks': 6 }, { 'n_blocks': 12 }, { 'n_blocks': 32 }, { 'n_blocks': 32 } ], # DenseNet 169
201 : [ { 'n_blocks': 6 }, { 'n_blocks': 12 }, { 'n_blocks': 48 }, { 'n_blocks': 32 } ] # DenseNet 201
}
# Meta-parameter: amount to reduce feature maps by (compression factor) during transition blocks
reduction = 0.5
# Meta-parameter: number of filters in a convolution block within a residual block (growth rate)
n_filters = 32
# Initial Hyperparameters
hyperparameters = { 'initializer': 'he_normal',
'regularizer': l2(0.001),
'relu_clip' : None,
'bn_epsilon' : None,
'use_bias' : False
}
def __init__(self, n_layers, n_filters=32, reduction=0.5,
input_shape=(224, 224, 3), n_classes=1000, include_top=True,
**hyperparameters):
""" Construct a Densely Connected Convolution Neural Network
n_layers : number of layers
n_filters : number of filters (growth rate)
reduction : anount to reduce feature maps by (compression factor)
input_shape : input shape
n_classes : number of output classes
include_top : whether to include the classifier
regularizer : kernel regularizer
initializer : kernel initializer
relu_clip : max value for ReLU
bn_epsilon : epsilon for batch norm
use_bias : whether to use bias
"""
# Configure base (super) class
Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)
# predefined
if isinstance(n_layers, int):
if n_layers not in [121, 169, 201]:
raise Exception("DenseNet: Invalid value for n_layers")
groups = list(self.groups[n_layers])
# user defined
else:
groups = n_layers
# The input vector
inputs = Input(shape=input_shape)
# The Stem Convolution Group
x = self.stem(inputs, n_filters)
# The Learner
outputs = self.learner(x, n_filters=n_filters, reduction=reduction, groups=groups)
# The Classifier
if include_top:
# Add hidden dropout layer
outputs = self.classifier(outputs, n_classes, dropout=0.1)
# Instantiate the model
self._model = Model(inputs, outputs)
def stem(self, inputs, n_filters):
""" Construct the Stem Convolution Group
inputs : input tensor
n_filters: number of filters for the dense blocks (k)
"""
# Pads input from 224x224 to 230x230
x = ZeroPadding2D(padding=((3, 3), (3, 3)))(inputs)
# First large convolution for abstract features for input 224 x 224 and output 112 x 112
# Stem convolution uses 2 * k (growth rate) number of filters
x = self.Conv2D(x, 2 * n_filters, (7, 7), strides=(2, 2))
x = self.BatchNormalization(x)
x = self.ReLU(x)
# Add padding so when downsampling we fit shape 56 x 56
x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = MaxPooling2D((3, 3), strides=2)(x)
return x
def learner(self, x, **metaparameters):
""" Construct the Learner
x : input to the learner
groups : set of number of blocks per group
"""
groups = metaparameters['groups']
# pop off the list the last dense block
last = groups.pop()
# Create the dense groups and interceding transition blocks
for group in groups:
x = self.group(x, **group, **metaparameters)
# Add the last dense group w/o a following transition block
metaparameters['reduction'] = None
x = self.group(x, **last, **metaparameters)
return x
def group(self, x, **metaparameters):
""" Construct a Dense Group
x : input to the group
n_blocks : number of residual blocks in dense group
reduction : amount to reduce (compress) feature maps by
"""
n_blocks = metaparameters['n_blocks']
reduction = metaparameters['reduction']
del metaparameters['reduction']
# Construct a group of residual blocks
for _ in range(n_blocks):
x = self.residual_block(x, **metaparameters)
# Construct interceding transition block
if reduction is not None:
x = self.trans_block(x, reduction=reduction, **metaparameters)
return x
def residual_block(self, x, **metaparameters):
""" Construct a Residual Block
x : input to the block
n_filters: number of filters in convolution layer in residual block
"""
if 'n_filters' in metaparameters:
n_filters = metaparameters['n_filters']
del metaparameters['n_filters']
else:
n_filters = self.n_filters
# Remember input tensor into residual block
shortcut = x
# BN-RE-Conv pre-activation form of convolutions
# Dimensionality expansion, expand filters by 4 (DenseNet-B)
x = self.BatchNormalization(x)
x = self.ReLU(x)
x = self.Conv2D(x, 4 * n_filters, (1, 1), strides=(1, 1), **metaparameters)
# Bottleneck convolution
# 3x3 convolution with padding=same to preserve same shape of feature maps
x = self.BatchNormalization(x)
x = self.ReLU(x)
x = self.Conv2D(x, n_filters, (3, 3), strides=(1, 1), padding='same',
**metaparameters)
# Concatenate the input (identity) with the output of the residual block
# Concatenation (vs. merging) provides Feature Reuse between layers
x = Concatenate()([shortcut, x])
return x
def trans_block(self, x, **metaparameters):
""" Construct a Transition Block
x : input layer
reduction: percentage of reduction of feature maps
"""
if 'reduction' in metaparameters:
reduction = metaparameters['reduction']
else:
reduction = self.reduction
del metaparameters['n_filters']
# Reduce (compress) the number of feature maps (DenseNet-C)
# shape[n] returns a class object. We use int() to cast it into the dimension size
n_filters = int( int(x.shape[3]) * reduction)
# BN-LI-Conv pre-activation form of convolutions
# Use 1x1 linear projection convolution
x = self.BatchNormalization(x)
x = self.Conv2D(x, n_filters, (1, 1), strides=(1, 1), **metaparameters)
# Use mean value (average) instead of max value sampling when pooling reduce by 75%
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
# Example
# densenet = DenseNet(121)
def example():
''' Example for constructing/training a DenseNet model on CIFAR-10
'''
# Example of constructing a mini-DenseNet
groups = [ { 'n_blocks': 3 }, { 'n_blocks': 6 }, { 'n_blocks': 12 } ]
densenet = DenseNet(groups, input_shape=(32, 32, 3), n_classes=10)
densenet.model.summary()
densenet.cifar10()
# example()
|
|
import csv
import math
import random
# This is an implementation of the Naive Bayes Algorithm.
# The dataset we are using is comprised of 768 observations of medical
# details for Pima Native American patients.
"""
Pima Native American dataset: https://archive.ics.uci.edu/ml/datasets/pima+indians+diabetes
Row schema:
1. Number of times pregnant.
2. Plasma glucose concentration after 2 hours in an oral glucose tolerance test.
3. Diastolic blood pressure (mm Hg).
4. Triceps skin fold thickness (mm).
5. 2-Hour serum insulin (mu U/ml).
6. Body mass index (weight in kg / (height in m) ^ 2).
7. Diabetes pedigree function.
8. Age (years).
9. Class variable (0 or 1) where 1 == "tested positive for diabetes".
"""
FILENAME = 'pima-data.csv'
SPLIT_RATIO = 0.67
""" Step 1: Handle data -- split into training and test datasets.
"""
# Load data.
def load_csv(filename):
lines = csv.reader(open(filename, "rb"))
dataset = list(lines)
# Convert attributes to floats.
for i in range(len(dataset)):
dataset[i] = [float(attr) for attr in dataset[i]]
return dataset
# Split dataset.
def split_dataset(dataset, split_ratio):
"""Preprocessing fn to split dataset into training and testing subsets.
Args:
dataset (list): List of data entries.
split_ratio (float): Percentage of dataset to use for training set.
Returns a tuple of the form (training_set, testing_set).
"""
training_set_size = int(len(dataset) * split_ratio)
training_set = []
testing_set = list(dataset)
for i in range(training_set_size):
index = random.randrange(len(testing_set)) # Generate random index from dataset.
training_set.append(testing_set.pop(index)) # Move entry from testing to training set.
return (training_set, testing_set)
""" Step 2: Summarize Data
Naive Bayes model is comprised of a summary of the data in the training
dataset; we will need this summary when we go to make predictions.
The summary of the training data involves the mean and standar devation for
each attribute, by class value. For instance, if there are two class values
and 7 numerical attributes, then we need a mean and standard deviation for
each attribute (7) and class value (2), that is 14 attribute summaries.
We will then use this summarized data to calculate the probability of a
specific attribute values belonging to each class values.
"""
# Separate data by class.
def separate_by_class(dataset):
"""Separates attributes by class. For this particular case, assumes
class is the last value in each row.
Args:
dataset (list): List of data entries.
Returns a dictionary mapping attribute rows to each class.
"""
class_map = {}
for row in dataset:
if row[-1] not in class_map:
class_map[row[-1]] = []
class_map[row[-1]].append(row)
return class_map
# Calculate mean.
def mean(numbers):
return sum(numbers) / float(len(numbers))
# Calculate standard deviation.
def standard_deviation(numbers):
"""Calculated as the square root of the variance where the variance
is the average of the squared differences for each datapoint from the mean.
Note: We are using the N - 1 method (Bessel's correction) which subtracts
1 from the number of datapoints when calculating the variance.
Args:
numbers (list): Dataset to calculate standard_deviation for.
Returns a float representing the standard deviation of the numbers arg.
"""
avg = mean(numbers)
squared_differences = [pow(x - avg, 2) for x in numbers]
variance = sum(squared_differences) / float(len(numbers) - 1)
return math.sqrt(variance)
# Summarize dataset.
def summarize(dataset):
"""Summarizes dataset; that is, for a given list of class values,
calculates the mean and standard deviation for each attribute.
Args:
dataset (list): List of data entries.
Returns:
A list of tuples of the form [
mean(attribute_0), std_dev(attribute_0)
...
]
"""
summaries = [(mean(attribute), standard_deviation(attribute))
for attribute in zip(*dataset)]
del summaries[-1] # last summary is a summary of the classes.
return summaries
# Summarize attributes by class.
def summarize_by_class(dataset):
class_map = separate_by_class(dataset)
summaries = dict()
for class_key in class_map:
class_attributes = class_map[class_key]
summaries[class_key] = summarize(class_attributes)
return summaries
"""Step 3: Make prediction.
To make a prediction, we will calculate the probabiliy that a given data
instnace belongs to each class, then select the class with the largest
probability as the prediction.
"""
# Calculate Guassian Probability Density Function.
# We use the Gaussian function to estimate the probability of a given
# attribute value, given the known mean and standard deviation for the
# attribute estimated from the training data.
# The result is the conditional probability of a given attribute value
# given a class value.
def calculate_gaussian_probability(val, mean, standard_deviation):
"""Calculates the probability that the passed in attribute value belongs
to a class given the mean and standard_deviation of that attribute for
the class.
"""
exponent = math.exp(
- (math.pow(val - mean, 2) /
(2 * math.pow(standard_deviation, 2)))
)
return (1 / (math.sqrt(2 * math.pi) * standard_deviation)) * exponent
# Calculate Class Probabilities.
def calculate_class_probabilities(summaries, input_vector):
"""Combines the probabilities of all the attribute values for a data
instance and comes up with a probability of the entire data instance
belonging to the class. We combine probabilities of individual attributes
by multiplying them together.
Returns a dictionary mapping class values to probabilities.
"""
probabilities = dict()
for class_key in summaries:
probabilities[class_key] = 1 # Initilize probabilities.
class_summaries = summaries[class_key]
for i in range(len(class_summaries)):
mean, std_dev = class_summaries[i]
x = input_vector[i]
# Multiply current probability by probability of current attribute
# belonging to the current class.
probabilities[class_key] *= calculate_gaussian_probability(x,
mean,
std_dev)
return probabilities
# Make a prediction.
def predict(summaries, input_vector):
"""Calculates class by comparing probabilities that the input_vector
belongs to each class and returning the class with the maximum probability.
"""
probabilities = calculate_class_probabilities(summaries, input_vector)
best_label = None
best_probability = None
for class_key in probabilities:
probability = probabilities[class_key]
if best_label is None or probability > best_probability:
best_probability = probability
best_label = class_key
return best_label
def get_predictions(summaries, test_set):
"""Makes predictions for each data instance in our test dataset. Used
to measure the accuracy of our model.
"""
predictions = []
for entry in test_set:
predictions.append(predict(summaries, entry))
return predictions
# Estimate Accuracy.
def get_accuracy(test_set, predictions):
"""Measures the accuracy of the model.
"""
correct = 0
for row in range(len(test_set)):
if test_set[row][-1] == predictions[row]:
correct += 1
return (correct / float(len(testSet))) * 100.0
if __name__ == "__main__":
dataset = load_csv(FILENAME)
print len(dataset)
(training, testing) = split_dataset(dataset, SPLIT_RATIO)
print(len(training), len(testing))
dataset = [[1,20,1], [2,21,0], [3,22,1]]
separated = separate_by_class(dataset)
print('Separated instances: {0}').format(separated)
data = [1, 2, 3, 4, 5]
avg = mean(data)
print(avg)
std_dev = standard_deviation(data)
print(std_dev)
dataset = [[1,20,0], [2,21,1], [3,22,0]]
summary = summarize(dataset)
print('Attribute summaries: {0}').format(summary)
dataset = [[1,20,1], [2,21,0], [3,22,1], [4,22,0]]
summary = summarize_by_class(dataset)
print('Summary by class value: {0}').format(summary)
val = 71.5
mean = 73
stdev = 6.2
probability = calculate_gaussian_probability(val, mean, stdev)
print('Probability of belonging to this class: {0}').format(probability)
summaries = {0:[(1, 0.5)], 1:[(20, 5.0)]}
inputVector = [1.1, '?']
probabilities = calculate_class_probabilities(summaries, inputVector)
print('Probabilities for each class: {0}').format(probabilities)
summaries = {'A':[(1, 0.5)], 'B':[(20, 5.0)]}
inputVector = [1.1, '?']
result = predict(summaries, inputVector)
print('Prediction: {0}').format(result)
summaries = {'A':[(1, 0.5)], 'B':[(20, 5.0)]}
testSet = [[1.1, '?'], [19.1, '?']]
predictions = get_predictions(summaries, testSet)
print('Predictions: {0}').format(predictions)
testSet = [[1,1,1,'a'], [2,2,2,'a'], [3,3,3,'b']]
predictions = ['a', 'a', 'a']
accuracy = get_accuracy(testSet, predictions)
print('Accuracy: {0}').format(accuracy)
print('Pima Indians Diabetes Data Set Prediction')
dataset = load_csv(FILENAME)
training_set, test_set = split_dataset(dataset, SPLIT_RATIO)
print('Split {0} rows into train={1} and test={2} rows').format(
len(dataset),
len(training_set),
len(test_set)
)
# Prepare model.
summaries = summarize_by_class(training_set)
# # Test model.
# predictions = get_predictions(summaries, test_set)
# accuracy = get_accuracy(test_set, predictions)
# print('Accuracy: {0}%').format(accuracy)
|
|
# -*- coding: utf-8 -*-
# try something like
from text_utils import cram
@caching
def index():
response.files.append(URL(r=request,c='static',f='css/prettyCheckboxes.css'))
response.files.append(URL(r=request,c='static',f='js/prettyCheckboxes.js'))
def timetable():
q = db.activity.type!='poster'
#q &= db.activity.type!='project'
q &= db.activity.status=='accepted'
q &= db.activity.scheduled_datetime!=None
rows = db(q).select(db.activity.id,
db.activity.title,
db.activity.track,
db.activity.status,
db.activity.abstract,
db.activity.level,
db.activity.type,
db.activity.created_by,
db.activity.authors,
db.activity.categories,
db.activity.duration,
db.activity.confirmed,
db.activity.scheduled_datetime,
db.activity.scheduled_room,
)
levels = {}
for i, level in enumerate(ACTIVITY_LEVELS):
levels[level] = XML("◊"* (i+1),)
activities_per_date = {}
slots_per_date = {}
rooms_per_date = {}
for activity in rows:
date = activity.scheduled_datetime.date()
time = activity.scheduled_datetime.time()
room = int(activity.scheduled_room)
activities_per_date.setdefault(date, []).append(activity)
if not room in rooms_per_date.get(date, []):
rooms_per_date.setdefault(date, []).append(room)
if time not in slots_per_date.get(date, []):
slots_per_date.setdefault(date, {}).setdefault(time, {})
# find overlapped slots
if activity.duration and activity.type not in ('open space', 'project', 'sprint', 'social', 'special'):
for i in range(activity.duration/60):
hidden_slot = activity.scheduled_datetime + datetime.timedelta(minutes=60*i)
hidden_slot_time = hidden_slot.time()
if hidden_slot_time not in slots_per_date.get(date, []):
slots_per_date[date].setdefault(hidden_slot_time, {})
elif activity.duration:
# record duration for special activities (social, meetings, sprints, etc.)
slots_per_date[date][time] = activity.duration
rooms = ACTIVITY_ROOMS.copy()
activities = {None: ""}
activities.update(dict([(row.id, row) for row in rows]))
schedule = dict([((row.scheduled_datetime,
row.scheduled_room),
row.id) for row in rows])
hidden = {'speakers': {}, 'activities': {}}
q = db.partaker.add_me==True
if 'votes' in request.vars:
qv = (db.partaker.comment.contains("vote"))
if request.vars['votes']=='no':
qv = ~qv
q &= qv
rows = db(q).select(db.partaker.activity,
db.partaker.user_id.count().with_alias("partakers"),
groupby=db.partaker.activity,)
partakers = dict([(row.partaker.activity, row.partakers) for row in rows])
return rooms, levels, activities, schedule, activities_per_date, slots_per_date, rooms_per_date, hidden, partakers
rooms, levels, activities, schedule, activities_per_date, slots_per_date, rooms_per_date, hidden, partakers = cache.ram(
request.env.path_info + "timetable",
lambda: timetable(),
time_expire=0)
schedule_tables = {}
popovers = []
if auth.user_id:
myactivities = db(db.partaker.user_id==auth.user_id).select()
else:
myactivities = []
#myactivities = db(db.partaker.user_id==auth.user_id).select()
for day in sorted(activities_per_date.keys()):
table = []
th = [TH("")]
rooms_names = [name for room, name in sorted(rooms.items()) if room in rooms_per_date[day]]
rooms_ids = dict([(name, room) for room, name in rooms.items()])
for name in sorted(set(rooms_names), key=lambda x: rooms_ids[x]):
th.append(TH(name, _colspan=rooms_names.count(name)))
table.append(THEAD(TR(*th)))
slots = sorted(slots_per_date[day])
for slot_n, slot in enumerate(slots):
slot_duration = slots_per_date.get(day, {}).get(slot)
if not slot_duration:
caption = "N/A"
elif len(slots) < 4 and slot_duration > 60 and slot_n == len(slots)-1:
slot_end = datetime.datetime.combine(day, slot) + datetime.timedelta(minutes=slot_duration)
caption = T("%s to %s") % (slot.strftime("%H:%M"),
slot_end.strftime("%H:%M"))
else:
caption = slot.strftime("%H:%M")
tr = [TD(caption, _width="5%", _style="text-align: center;", )]
common = None
width = "%d%%" % (100/len(rooms_per_date[day])-5)
for room in rooms:
if not room in rooms_per_date[day]:
continue # hide unused rooms
# find an activity for this slot
slot_dt = datetime.datetime.combine(day, slot)
selected = schedule.get((slot_dt, str(room)))
if selected == False:
continue # spanned row!
activity = selected and activities[int(selected)] \
or None
if activity:
if activity.authors and \
len(activity.authors.strip()) > 1:
authors = A(cram(activity.authors, 25),
_href=URL('authors', args=[activity.created_by]),
_id="autors%s" % activity.id,
rel="popover",
**{'_data-content': "bio.....",
'_data-original-title': T("Speaker"),
'_data-trigger': 'hover',
})
else:
authors = ''
activity_selected = False
select_activity = ""
label = "activity_selected_%s" % activity.id
if auth.user_id and myactivities is not None:
response.flash = ""
for act in myactivities:
if act["activity"] is not None:
if (act["activity"].id==activity.id) and (act["add_me"]):
activity_selected = "on"
select_activity = INPUT(value=activity_selected,
_type="checkbox",
_id=label,
_class="pretty_checkbox",
_onclick="markActivity('activity_selected_%s', '%s');" % (activity.id, activity.id))
attendance = partakers.get(activity.id, "")
td = TD(select_activity,
LABEL(_for=label),
A(B(cram(activity.title, 50)),
_href=URL('content', args=[activity.id]),
_id="abstract%s" % activity.id,
rel="popover",
**{'_data-content': activity.abstract,
'_data-original-title': T("Summary"),
'_data-trigger': 'hover',
}),
BR(),
authors and \
ACTIVITY_LEVEL_HINT[activity.level] \
or '',
authors and \
I(" %s " % \
(', '.join(activity.categories or \
[])),
BR(), "", authors, "") or "",
IMG(_src=URL(c='static', f='img/warning.png'),
_title=T("our estimate of attendance reaches the room size, last remaining seats!"),
_style="float:right; border:0;")
if attendance>=ACTIVITY_ROOMS_EST_SIZES[room] and
auth.is_logged_in() and auth.has_membership("manager")
else "",
TAG.SUP(A(attendance, _href=URL(c='schedule', f='partakers', args=[activity.id])), _style="float:right;")
if auth.is_logged_in() and auth.has_membership("manager")
else "",
_width=width,
_style="text-align: center;",
_id=not activity.confirmed and "unconfirmed" or "confirmed",
_class="%s %s" % (activity.track,
activity.type.replace(" ", "_")))
popovers.append("""$("#%s").popover();""" % ("autors%s" % activity.id))
popovers.append("""$("#%s").popover();""" % ("abstract%s" % activity.id))
if activity.type in ACTIVITY_COMMON:
tr = [tr[0],]
td.attributes["_colspan"] = len(rooms)
tr.append(td)
break
else:
if activity.duration:
slot_span = 0
for next_slot in slots[slots.index(slot)+1:]:
next_slot_dt = datetime.datetime.combine(slot_dt.date(), next_slot)
if next_slot_dt > (slot_dt + datetime.timedelta(minutes=activity.duration-1)):
break
else:
# mark the slot as spanned
schedule[(next_slot_dt, str(room))] = False
slot_span += 1
if slot_span:
td.attributes["_rowspan"] = slot_span + 1
tr.append(td)
else:
tr.append(TD(_width=width))
table.append(TR(*tr))
schedule_tables[day] = TABLE(*table, _class="table-bordered table-hover table-condensed")
d = dict(activities_per_date=activities_per_date,
levels=levels, hidden=hidden['activities'].values()+hidden['speakers'].values(),
popovers=popovers,
schedule_tables=schedule_tables)
return response.render(d)
@cache(request.env.path_info,time_expire=60*15,cache_model=cache.ram)
def content():
"Render the activity summary for each modal box in the schedule"
if not request.args:
raise HTTP(404)
activity = db.activity[request.args[0]]
if not activity.status=='accepted':
raise HTTP(403)
return MARKMIN(activity.abstract or '')
@cache(request.env.path_info,time_expire=60*15,cache_model=cache.ram)
def authors():
"Render the speaker summary for each modal box in the schedule"
if not request.args:
raise HTTP(404)
author = db.auth_user[request.args[0]]
if not author.speaker:
raise HTTP(403)
if author.photo:
img = IMG(_alt=author.last_name,
_src=URL(r=request, c='default',
f='fast_download', args=author.photo),
_width="100px", _height="100px",
_style="margin-left: 5px; margin-right: 5px; \
margin-top: 3px; margin-bottom: 3px; \
float: left;").xml()
else:
img = ""
return img + MARKMIN(author.resume or '').xml()
@auth.requires_membership(role="manager")
def agenda():
if 'order' in request.vars:
order = {'title': db.activity.title,
'track': db.activity.title,
'status': db.activity.status,
'scheduled_datetime': db.activity.scheduled_datetime,
'scheduled_room': db.activity.scheduled_room,
}[request.vars['order']]
else:
order = (db.activity.type, db.activity.track, db.activity.title)
response.view = 'generic.html'
q = db.activity.type!='poster'
q &= db.activity.type!='project'
rows = db(q).select(db.activity.id,
db.activity.title,
db.activity.status,
db.activity.scheduled_datetime,
db.activity.scheduled_room,
orderby=order)
rooms = ACTIVITY_ROOMS.copy()
statuses = ['pending','accepted','rejected', 'declined']
rooms[None] = ""
fields = []
for row in rows:
fields.extend([
INPUT(_name='activity.%s' % row.id, _value=row.title,
_readonly=True),
INPUT(_name='date.%s' % row.id,
requires=IS_EMPTY_OR(IS_DATETIME()),
_value=row.scheduled_datetime),
SELECT([OPTION(opt,
_value=opt,
_selected=row.status==opt) for \
opt in statuses],
_name='status.%s' % row.id),
SELECT([OPTION(rooms[opt],
_value=opt,
_selected=row.scheduled_room==opt) for \
opt in sorted(rooms.keys())],
_name='room.%s' % row.id), BR()])
fields.append(INPUT(_type="submit"))
form = FORM(*fields)
out = []
if form.accepts(request.vars, session):
response.flash = 'form accepted'
for var in form.vars.keys() :
activity_id = "." in var and int(var.split(".")[1]) or None
val = form.vars[var]
if var.startswith("date") and val and activity_id :
db(db.activity.id==activity_id).update(scheduled_datetime=val)
out.append("setting %s=%s" % (var, val))
if var.startswith("status") and val and activity_id :
db(db.activity.id==activity_id).update(status=val)
out.append("setting %s=%s" % (var, val))
if var.startswith("room") and val and activity_id :
db(db.activity.id==activity_id).update(scheduled_room=val)
out.append("setting %s=%s" % (var, val))
elif form.errors:
response.flash = 'form has errors'
return dict(form=form, out=out)
@auth.requires_membership(role="manager")
def agenda2():
response.view = 'generic.html'
rows = db(db.activity.id>=1).select(db.activity.id,
db.activity.title,
db.activity.scheduled_datetime,
db.activity.scheduled_room,
orderby=db.activity.title)
activities = sorted([(row.id, row.title) for row in rows], key=lambda x: x[1])
rooms = sorted([(0, "")] + list(ACTIVITY_ROOMS.items()), key=lambda x: x[1])
form = SQLFORM.factory(
Field("activity_id", 'integer', requires=IS_IN_SET(activities)),
Field("room", 'integer', requires=IS_IN_SET(rooms)),
Field("datetime", 'datetime'),
)
if form.accepts(request.vars, session, keepvalues=True):
response.flash = 'form accepted'
db(db.activity.id==form.vars.activity_id).update(
scheduled_datetime=form.vars.datetime,
scheduled_room=form.vars.room)
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
@auth.requires_membership(role="manager")
def grid():
response.view = 'generic.html'
q = db.activity.type!='poster'
q &= db.activity.type!='project'
q &= db.activity.status=='accepted'
rows = db(q).select(db.activity.id,
db.activity.title,
db.activity.status,
db.activity.scheduled_datetime,
db.activity.scheduled_room,
)
rooms = ACTIVITY_ROOMS.copy()
slots = SLOTS
days = DAYS
fields = []
activities = {None: ""}
activities.update(dict([(row.id, row.title) for row in rows]))
schedule = dict([((row.scheduled_datetime, row.scheduled_room), row.id) for row in rows])
##fields.append(BEAUTIFY(schedule))
table = []
for day in days:
th = [TH(day)] + [TH(room) for room in rooms.values()]
table.append(TR(*th))
for slot in slots:
tr = [TH(slot)]
for room in rooms:
dt = datetime.datetime.strptime("%s %s" % (day, slot), "%Y-%m-%d %H:%M")
selected = schedule.get((dt, str(room)))
selected = selected and int(selected)
tr.append(
TD(
SELECT([OPTION(v,
_value=k,
_selected=k and int(k)==selected) for \
(k, v) in sorted(activities.items(),
key=lambda x: x[1].lower())],
_name='slot.%s.%s.%s' % (day, slot, room),
_style='width: 100px')
))
table.append(TR(*tr))
fields.append(TABLE(*table))
fields.append(INPUT(_type="submit"))
form = FORM(*fields)
out = []
if form.accepts(request.vars, session, keepvalues=True):
response.flash = 'form accepted'
for var in form.vars.keys() :
val = form.vars[var]
if var.startswith('slot'):
#slot.%s.%s
activity_id = val
slot, slot_day, slot_time, slot_room = var.split(".")
dt = datetime.datetime.strptime("%s %s" % (slot_day, slot_time), "%Y-%m-%d %H:%M")
# clean up slot if previously allocated
q = db.activity.scheduled_datetime == dt
q &= db.activity.scheduled_room == slot_room
db(q).update(scheduled_datetime=None, scheduled_room=None)
# allocate activity (if selected for this slot)
if activity_id:
q = db.activity.id==activity_id
db(q).update(scheduled_datetime=dt,
scheduled_room=slot_room)
out.append("setting %s = d%s %s %s" % (activity_id, slot_day, slot_time, slot_room))
pass
elif form.errors:
response.flash = 'form has errors'
return dict(form=form, out=out)
@auth.requires_login()
def markactivity():
if request.args[3] == "checked":
add_me = True
else:
add_me = False
activity = int(request.args[1])
comment = T("Marked in schedule on %s") % request.now
participation = db((db.partaker.user_id==auth.user_id)&(db.partaker.activity==activity)).select().first()
if participation is None:
db.partaker.insert(user_id=auth.user_id, activity=activity, add_me=add_me, comment=comment)
else:
participation.update_record(add_me=add_me, comment=comment)
raise HTTP(200, T("Done!"))
@auth.requires_login()
def ics():
redirect(URL('bookmarks'))
def icalendar(user):
"Export customized schedule as an iCalendar file"
import datetime, time
from cStringIO import StringIO
ical = StringIO()
# get user bookmarked activities, or all the activities if not logged in:
if user:
q = db.partaker.user_id==user.id
q &= db.partaker.activity==db.activity.id
q &= db.partaker.add_me==True
filename = "pycon%s-%s-bookmark.ics" % (request.application, user.last_name)
calname = "%s - %s" % (response.title, user.last_name)
else:
q = db.activity.status=='accepted'
filename = "pycon%s-bookmark.ics" % (request.application)
calname = "%s" % (response.title, )
activities = db(q).select(
db.activity.id,
db.activity.title,
db.activity.scheduled_datetime,
db.activity.scheduled_room,
db.activity.duration,
db.activity.scheduled_room,
db.activity.abstract,
db.activity.authors,
db.activity.type,
)
ical.write('BEGIN:VCALENDAR')
ical.write('\nVERSION:2.0')
ical.write('\nX-WR-CALNAME:%s' % calname)
ical.write('\nX-WR-TIMEZONE:%s' % time.tzname[0])
ical.write('\nSUMMARY:%s' % response.title)
ical.write('\nPRODID:-//PyCon %s Bookmarks//%s//EN' % (request.application,
request.env.http_host,))
ical.write('\nCALSCALE:GREGORIAN')
ical.write('\nMETHOD:PUBLISH')
format = '%Y%m%dT%H%M%SZ'
def ical_escape(text):
tokens = (("\\", "\\\\"), (";", r"\;"), (",", r"\,"), ("\n", "\\n"), ("\r", ""))
text = text.decode("utf8", "replace")
for (escape, replacement) in tokens:
text = text.replace(escape, replacement)
return text.encode("utf8", "replace")
for item in activities:
if not item.scheduled_datetime or not item.duration:
continue
url = '%s://%s%s' % (request.env.wsgi_url_scheme,
request.env.http_host,
URL(c='activity', f='accepted', args=item.id))
# convert local times to UTC
start = item.scheduled_datetime - datetime.timedelta(seconds=-time.timezone)
ical.write('\nBEGIN:VEVENT')
ical.write('\nUID:%s' % url)
ical.write('\nURL:%s' % url)
ical.write('\nDTSTART:%s' % start.strftime(format))
ical.write('\nDTEND:%s' % (start+datetime.timedelta(minutes=item.duration)).strftime(format))
ical.write('\nSUMMARY:%s (%s)' % (ical_escape(item.title), str(T(item.type))))
authors = ical_escape(item.authors)
abstract = ical_escape(item.abstract)
desc = "%s\\n\\n%s" % (authors, abstract)
ical.write('\nDESCRIPTION:')
ical.write(desc)
if item.scheduled_room:
location = "%s, " % ACTIVITY_ROOMS.get(int(item.scheduled_room), "")
location += ACTIVITY_ROOMS_ADDRESS.get(int(item.scheduled_room), "")
ical.write('\nLOCATION:%s' % location)
ical.write('\nEND:VEVENT')
ical.write('\nEND:VCALENDAR')
s = ical.getvalue()
ical.close()
# remove accents
import unicodedata
if not isinstance(s, unicode):
s = unicode(s, "utf8", "ignore")
nkfd_form = unicodedata.normalize('NFKD', s)
only_ascii = nkfd_form.encode('ASCII', 'ignore')
return only_ascii, filename
def bookmarks():
if auth.is_logged_in():
user_id = auth.user_id
user = auth.user
elif request.args:
user_id = request.args[0]
url_hash = request.args[1]
# check security hash is correct
user = db.auth_user[user_id]
if not user.security_hash == url_hash:
user_id = None
else:
user = None
try:
ical, filename = icalendar(user)
except Exception, e:
raise RuntimeError("%s" % e)
response.headers['Content-Type']='text/calendar'
response.headers["Content-Disposition"] \
= "attachment; filename=%s" % filename
return ical
@auth.requires_membership(role="manager")
def test_ical():
ret = []
rows = db(db.auth_user.id>0).select()
for u in rows:
try:
s, fn = icalendar(u)
ret.append("%s: ok %s %d "% (u.id, fn, len(s)))
except Exception, e:
ret.append("%s: err %s" % (u.id, e))
response.view = "generic.html"
return {"ret":ret}
@auth.requires_membership(role="manager")
def partakers():
response.view = "generic.html"
project = db.activity[request.args(0)]
q = db.partaker.activity == project.id
q &= db.partaker.add_me == True
q &= db.partaker.user_id == db.auth_user.id
partakers = db(q).select(db.auth_user.email)
return dict(partakers=[p.email for p in partakers])
|
|
import logging
import ldap
import re
class LDAPPublisher:
"""
Publish a Zimbra addressbook to a LDAP server
"""
addressbook = None
""" Zimbra Addressbook to publish """
config = None
""" Publisher configuration """
attribute_map = {
"cn": "fileAsStr",
"sn": "_attrs/lastName",
"givenname": "_attrs/firstName",
"street": "_attrs/workStreet",
"l": "_attrs/workCity",
"st": "_attrs/workState",
"postalCode": "_attrs/workPostalCode",
"telephoneNumber": "_attrs/workPhone",
"facsimileTelephoneNumber": "_attrs/workFax",
"mobile": "_attrs/mobilePhone",
"mail": "_attrs/email",
"labeleduri": "_attrs/workURL",
"o": "_attrs/company",
"ou": "_attrs/department",
"description": "_attrs/notes"
}
""" Attribute Map Zimbra <> LDAP """
ldap_connect = None
""" LDAP-Connection used by the publisher """
mandatory_attributes = ["cn", "sn"]
""" Mandatory LDAP attributes """
attribute_alternatives = {
"sn": "o",
"sn": "cn"
}
""" Alternatives for specific attributes if empty """
log_attribute = "cn"
""" Attribute to use when logging """
def __init__(self, config, addressbook):
""" Initialize Publisher """
self.addressbook = addressbook
self.config = config
logging.debug("Initialised Publisher %s" % (self.config["name"]))
def drop_tree(self, dn):
""" Recursively drop a LDAP tree """
logging.debug("Deleting dn %s" % (dn))
result = self.ldap_connect.search_s(
dn,
ldap.SCOPE_ONELEVEL # @UndefinedVariable
)
if len(result) > 0:
for leaf in result:
self.drop_tree(leaf[0])
self.ldap_connect.delete_s(dn)
def run(self):
""" Publish the addressbook """
# Bind to ldap
self.ldap_connect = ldap.initialize(self.config["ldap_url"])
self.ldap_connect.simple_bind_s(
self.config["bind_uid"],
self.config["bind_pw"],
)
logging.debug("Connected to LDAP-Server %s as user %s" %
(
self.config["ldap_url"],
self.config["bind_uid"]
)
)
ldap_dn = "ou=%s,%s" % (self.config["name"], self.config["base_dn"])
# Find our branch
result = self.ldap_connect.search_s(
self.config["base_dn"],
ldap.SCOPE_SUBTREE, # @UndefinedVariable
"ou=%s" % (self.config["name"])
)
if len(result) > 0 and self.config["drop"] == "1":
# Branch exists, but needs to be recreated
logging.info("Dropping branch %s" % (ldap_dn))
self.drop_tree(ldap_dn)
if (len(result) == 0) or (
len(result) > 0 and self.config["drop"] == "1"
):
# Branch doesn't exists or is recently dropped. Recreate!
add_data = [
("objectclass", ["top", "organizationalUnit"]),
("ou", [self.config["name"]])
]
logging.info("Recreating tree %s" % (ldap_dn))
self.ldap_connect.add_s(ldap_dn, add_data)
uid = 0
for address in self.addressbook:
current_item = ""
converted_addressbook = {}
for attribute in self.attribute_map:
matched_attribute = re.search(
"_attrs/(.*)",
self.attribute_map[attribute]
)
if matched_attribute != None:
if matched_attribute.group(1) in address["_attrs"]:
attribute_value = \
address["_attrs"][matched_attribute.group(1)]
else:
attribute_value = ""
else:
attribute_value = address[self.attribute_map[attribute]]
if (self.attribute_map[attribute] in address):
attribute_value = \
address[self.attribute_map[attribute]]
else:
attribute_value = ""
if attribute == self.log_attribute:
current_item = attribute_value
try:
ldap_value = attribute_value.encode('ascii')
except UnicodeEncodeError:
ldap_value = attribute_value.encode('utf-8')
converted_addressbook[attribute] = ldap_value
# Apply alternatives
for attribute in self.attribute_alternatives:
alternate_attribute = self.attribute_alternatives[attribute]
if converted_addressbook[attribute] == "" and\
converted_addressbook[alternate_attribute] != "":
converted_addressbook[attribute] = \
converted_addressbook[alternate_attribute]
sanity_checked = True
for attribute in self.mandatory_attributes:
if converted_addressbook[attribute] == "":
sanity_checked = False
if sanity_checked:
logging.info("Adding entry %s" % (current_item))
add_data = [
("objectClass", [
'top',
'person',
'organizationalperson',
'inetorgperson'
])
]
for entry in converted_addressbook:
if converted_addressbook[entry] != "":
add_data.append(
(
entry,
[converted_addressbook[entry]]
)
)
dn = "uid=%d,%s" % (uid, ldap_dn)
logging.debug(
"Adding entry at dn %s with the following data:\n %s" % (
dn,
add_data
)
)
self.ldap_connect.add_s(dn, add_data)
uid = uid + 1
|
|
"""Test libzmq security (libzmq >= 3.3.0)"""
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import contextlib
import time
from threading import Thread
import zmq
from zmq.tests import (
BaseZMQTestCase, SkipTest, PYPY
)
from zmq.utils import z85
USER = b"admin"
PASS = b"password"
class TestSecurity(BaseZMQTestCase):
def setUp(self):
if zmq.zmq_version_info() < (4,0):
raise SkipTest("security is new in libzmq 4.0")
try:
zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("security requires libzmq to be built with CURVE support")
super(TestSecurity, self).setUp()
def zap_handler(self):
socket = self.context.socket(zmq.REP)
socket.bind("inproc://zeromq.zap.01")
try:
msg = self.recv_multipart(socket)
version, sequence, domain, address, identity, mechanism = msg[:6]
if mechanism == b'PLAIN':
username, password = msg[6:]
elif mechanism == b'CURVE':
key = msg[6]
self.assertEqual(version, b"1.0")
self.assertEqual(identity, b"IDENT")
reply = [version, sequence]
if mechanism == b'CURVE' or \
(mechanism == b'PLAIN' and username == USER and password == PASS) or \
(mechanism == b'NULL'):
reply.extend([
b"200",
b"OK",
b"anonymous",
b"\5Hello\0\0\0\5World",
])
else:
reply.extend([
b"400",
b"Invalid username or password",
b"",
b"",
])
socket.send_multipart(reply)
finally:
socket.close()
@contextlib.contextmanager
def zap(self):
self.start_zap()
time.sleep(0.5) # allow time for the Thread to start
try:
yield
finally:
self.stop_zap()
def start_zap(self):
self.zap_thread = Thread(target=self.zap_handler)
self.zap_thread.start()
def stop_zap(self):
self.zap_thread.join()
def bounce(self, server, client, test_metadata=True):
msg = [os.urandom(64), os.urandom(64)]
client.send_multipart(msg)
frames = self.recv_multipart(server, copy=False)
recvd = list(map(lambda x: x.bytes, frames))
try:
if test_metadata and not PYPY:
for frame in frames:
self.assertEqual(frame.get('User-Id'), 'anonymous')
self.assertEqual(frame.get('Hello'), 'World')
self.assertEqual(frame['Socket-Type'], 'DEALER')
except zmq.ZMQVersionError:
pass
self.assertEqual(recvd, msg)
server.send_multipart(recvd)
msg2 = self.recv_multipart(client)
self.assertEqual(msg2, msg)
def test_null(self):
"""test NULL (default) security"""
server = self.socket(zmq.DEALER)
client = self.socket(zmq.DEALER)
self.assertEqual(client.MECHANISM, zmq.NULL)
self.assertEqual(server.mechanism, zmq.NULL)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client, False)
def test_plain(self):
"""test PLAIN authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.assertEqual(client.plain_username, b'')
self.assertEqual(client.plain_password, b'')
client.plain_username = USER
client.plain_password = PASS
self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
assert not client.plain_server
assert server.plain_server
with self.zap():
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
def skip_plain_inauth(self):
"""test PLAIN failed authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
client.plain_username = USER
client.plain_password = b'incorrect'
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
with self.zap():
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
client.send(b'ping')
server.rcvtimeo = 250
self.assertRaisesErrno(zmq.EAGAIN, server.recv)
def test_keypair(self):
"""test curve_keypair"""
try:
public, secret = zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("CURVE unsupported")
self.assertEqual(type(secret), bytes)
self.assertEqual(type(public), bytes)
self.assertEqual(len(secret), 40)
self.assertEqual(len(public), 40)
# verify that it is indeed Z85
bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
self.assertEqual(type(bsecret), bytes)
self.assertEqual(type(bpublic), bytes)
self.assertEqual(len(bsecret), 32)
self.assertEqual(len(bpublic), 32)
def test_curve(self):
"""test CURVE encryption"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
try:
server.curve_server = True
except zmq.ZMQError as e:
# will raise EINVAL if no CURVE support
if e.errno == zmq.EINVAL:
raise SkipTest("CURVE unsupported")
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
server.curve_secretkey = server_secret
server.curve_publickey = server_public
client.curve_serverkey = server_public
client.curve_publickey = client_public
client.curve_secretkey = client_secret
self.assertEqual(server.mechanism, zmq.CURVE)
self.assertEqual(client.mechanism, zmq.CURVE)
self.assertEqual(server.get(zmq.CURVE_SERVER), True)
self.assertEqual(client.get(zmq.CURVE_SERVER), False)
with self.zap():
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
|
|
from __future__ import division, print_function, absolute_import
from ...utils.six.moves import xrange
import numpy as np
import nose
from dipy.io.bvectxt import orientation_from_string
from dipy.tracking.utils import (affine_for_trackvis, connectivity_matrix,
density_map, length, move_streamlines,
ndbincount, reduce_labels,
reorder_voxels_affine, seeds_from_mask,
target, _rmi)
import dipy.tracking.metrics as metrix
from dipy.tracking.vox2track import streamline_mapping
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_raises, assert_true
def make_streamlines():
streamlines = [ np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[5, 10, 12]], 'float'),
np.array([[1, 2, 3],
[3, 2, 0],
[5, 20, 33],
[40, 80, 120]], 'float') ]
return streamlines
def test_density_map():
#One streamline diagonal in volume
streamlines = [np.array([np.arange(10)]*3).T]
shape = (10, 10, 10)
x = np.arange(10)
expected = np.zeros(shape)
expected[x, x, x] = 1.
dm = density_map(streamlines, vol_dims=shape, voxel_size=(1, 1, 1))
assert_array_equal(dm, expected)
#add streamline, make voxel_size smaller. Each streamline should only be
#counted once, even if multiple points lie in a voxel
streamlines.append(np.ones((5, 3)))
shape = (5, 5, 5)
x = np.arange(5)
expected = np.zeros(shape)
expected[x, x, x] = 1.
expected[0, 0, 0] += 1
dm = density_map(streamlines, vol_dims=shape, voxel_size=(2, 2, 2))
assert_array_equal(dm, expected)
#should work with a generator
dm = density_map(iter(streamlines), vol_dims=shape, voxel_size=(2, 2, 2))
assert_array_equal(dm, expected)
# Test passing affine
affine = np.diag([2, 2, 2, 1.])
affine[:3, 3] = 1.
dm = density_map(streamlines, shape, affine=affine)
assert_array_equal(dm, expected)
# Shift the image by 2 voxels, ie 4mm
affine[:3, 3] -= 4.
expected_old = expected
new_shape = [i + 2 for i in shape]
expected = np.zeros(new_shape)
expected[2:, 2:, 2:] = expected_old
dm = density_map(streamlines, new_shape, affine=affine)
assert_array_equal(dm, expected)
def test_connectivity_matrix():
label_volume = np.array([[[3, 0, 0],
[0, 0, 0],
[0, 0, 4]]])
streamlines = [np.array([[0,0,0],[0,0,0],[0,2,2]], 'float'),
np.array([[0,0,0],[0,1,1],[0,2,2]], 'float'),
np.array([[0,2,2],[0,1,1],[0,0,0]], 'float')]
expected = np.zeros((5, 5), 'int')
expected[3, 4] = 2
expected[4, 3] = 1
# Check basic Case
matrix = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False)
assert_array_equal(matrix, expected)
# Test mapping
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False, return_mapping=True)
assert_array_equal(matrix, expected)
assert_equal(mapping[3, 4], [0, 1])
assert_equal(mapping[4, 3], [2])
assert_equal(mapping.get((0, 0)), None)
# Test mapping and symmetric
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=True, return_mapping=True)
assert_equal(mapping[3, 4], [0, 1, 2])
# When symmetric only (3,4) is a key, not (4, 3)
assert_equal(mapping.get((4, 3)), None)
# expected output matrix is symmetric version of expected
expected = expected + expected.T
assert_array_equal(matrix, expected)
# Test mapping_as_streamlines, mapping dict has lists of streamlines
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False,
return_mapping=True,
mapping_as_streamlines=True)
assert_true(mapping[3, 4][0] is streamlines[0])
assert_true(mapping[3, 4][1] is streamlines[1])
assert_true(mapping[4, 3][0] is streamlines[2])
# Test passing affine to connectivity_matrix
expected = matrix
affine = np.diag([-1, -1, -1, 1.])
streamlines = [-i for i in streamlines]
matrix = connectivity_matrix(streamlines, label_volume, affine=affine)
# In the symmetrical case, the matrix should be, well, symmetric:
assert_equal(matrix[4,3], matrix[4,3])
def test_ndbincount():
def check(expected):
assert_equal(bc[0, 0], expected[0])
assert_equal(bc[0, 1], expected[1])
assert_equal(bc[1, 0], expected[2])
assert_equal(bc[2, 2], expected[3])
x = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [1, 0], [2, 2]]).T
expected = [2, 2, 1, 1]
#count occurrences in x
bc = ndbincount(x)
assert_equal(bc.shape, (3, 3))
check(expected)
#pass in shape
bc = ndbincount(x, shape=(4, 5))
assert_equal(bc.shape, (4, 5))
check(expected)
#pass in weights
weights = np.arange(6.)
weights[-1] = 1.23
expeceted = [1., 5., 4., 1.23]
bc = ndbincount(x, weights=weights)
check(expeceted)
#raises an error if shape is too small
assert_raises(ValueError, ndbincount, x, None, (2, 2))
def test_reduce_labels():
shape = (4, 5, 6)
#labels from 100 to 220
labels = np.arange(100, np.prod(shape)+100).reshape(shape)
#new labels form 0 to 120, and lookup maps range(0,120) to range(100, 220)
new_labels, lookup = reduce_labels(labels)
assert_array_equal(new_labels, labels-100)
assert_array_equal(lookup, labels.ravel())
def test_move_streamlines():
streamlines = make_streamlines()
affine = np.eye(4)
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i])
affine[:3,3] += (4,5,6)
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i]+(4, 5, 6))
affine = np.eye(4)
affine = affine[[2,1,0,3]]
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])
affine[:3,3] += (4,5,6)
new_streamlines = move_streamlines(streamlines, affine)
undo_affine = move_streamlines(new_streamlines, np.eye(4),
input_space=affine)
for i, test_sl in enumerate(undo_affine):
assert_array_almost_equal(test_sl, streamlines[i])
# Test that changing affine does affect moving streamlines
affineA = affine.copy()
affineB = affine.copy()
streamlinesA = move_streamlines(streamlines, affineA)
streamlinesB = move_streamlines(streamlines, affineB)
affineB[:] = 0
for (a, b) in zip(streamlinesA, streamlinesB):
assert_array_equal(a, b)
def test_target():
streamlines = [np.array([[0., 0., 0.],
[1., 0., 0.],
[2., 0., 0.]]),
np.array([[0., 0., 0],
[0, 1., 1.],
[0, 2., 2.]])
]
affine = np.eye(4)
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 0, 0] = True
# Both pass though
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 2)
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 0)
# only first
mask[:] = False
mask[1, 0, 0] = True
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[0])
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[1])
# Test that bad points raise a value error
bad_sl = [ np.array([[10., 10., 10.]])]
new = target(bad_sl, mask, affine=affine)
assert_raises(ValueError, list, new)
bad_sl = [-np.array([[10., 10., 10.]])]
new = target(bad_sl, mask, affine=affine)
assert_raises(ValueError, list, new)
# Test smaller voxels
affine = np.random.random((4, 4)) - .5
affine[3] = [0, 0, 0, 1]
streamlines = list(move_streamlines(streamlines, affine))
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[0])
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[1])
# Test that changing mask and affine do not break target
include = target(streamlines, mask, affine=affine)
exclude = target(streamlines, mask, affine=affine, include=False)
affine[:] = np.eye(4)
mask[:] = False
include = list(include)
exclude = list(exclude)
assert_equal(len(include), 1)
assert_true(include[0] is streamlines[0])
assert_equal(len(exclude), 1)
assert_true(exclude[0] is streamlines[1])
def test_voxel_ornt():
sh = (40, 40, 40)
sz = (1, 2, 3)
I4 = np.eye(4)
ras = orientation_from_string('ras')
sra = orientation_from_string('sra')
lpi = orientation_from_string('lpi')
srp = orientation_from_string('srp')
affine = reorder_voxels_affine(ras, ras, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(sra, sra, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(lpi, lpi, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(srp, srp, sh, sz)
assert_array_equal(affine, I4)
streamlines = make_streamlines()
box = np.array(sh)*sz
sra_affine = reorder_voxels_affine(ras, sra, sh, sz)
toras_affine = reorder_voxels_affine(sra, ras, sh, sz)
assert_array_equal(np.dot(toras_affine, sra_affine), I4)
expected_sl = (sl[:, [2, 0, 1]] for sl in streamlines)
test_sl = move_streamlines(streamlines, sra_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
lpi_affine = reorder_voxels_affine(ras, lpi, sh, sz)
toras_affine = reorder_voxels_affine(lpi, ras, sh, sz)
assert_array_equal(np.dot(toras_affine, lpi_affine), I4)
expected_sl = (box - sl for sl in streamlines)
test_sl = move_streamlines(streamlines, lpi_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
srp_affine = reorder_voxels_affine(ras, srp, sh, sz)
toras_affine = reorder_voxels_affine(srp, ras, (40 ,40, 40), (3, 1, 2))
assert_array_equal(np.dot(toras_affine, srp_affine), I4)
expected_sl = [sl.copy() for sl in streamlines]
for sl in expected_sl:
sl[:, 1] = box[1] - sl[:, 1]
expected_sl = (sl[:, [2, 0, 1]] for sl in expected_sl)
test_sl = move_streamlines(streamlines, srp_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
def test_streamline_mapping():
streamlines = [np.array([[0, 0, 0], [0, 0, 0], [0, 2, 2]], 'float'),
np.array([[0, 0, 0], [0, 1, 1], [0, 2, 2]], 'float'),
np.array([[0, 2, 2], [0, 1, 1], [0, 0, 0]], 'float')]
mapping = streamline_mapping(streamlines, (1, 1, 1))
expected = {(0, 0, 0):[0, 1, 2], (0, 2, 2):[0, 1, 2], (0, 1, 1):[1, 2]}
assert_equal(mapping, expected)
mapping = streamline_mapping(streamlines, (1, 1, 1),
mapping_as_streamlines=True)
expected = dict((k, [streamlines[i] for i in indices])
for k, indices in expected.items())
assert_equal(mapping, expected)
# Test passing affine
affine = np.eye(4)
affine[:3, 3] = .5
mapping = streamline_mapping(streamlines, affine=affine,
mapping_as_streamlines=True)
assert_equal(mapping, expected)
# Make the voxel size smaller
affine = np.diag([.5, .5, .5, 1.])
affine[:3, 3] = .25
expected = dict((tuple(i*2 for i in key), value)
for key, value in expected.items())
mapping = streamline_mapping(streamlines, affine=affine,
mapping_as_streamlines=True)
assert_equal(mapping, expected)
def test_rmi():
I1 = _rmi([3, 4], [10, 10])
assert_equal(I1, 34)
I1 = _rmi([0, 0], [10, 10])
assert_equal(I1, 0)
assert_raises(ValueError, _rmi, [10, 0], [10, 10])
try:
from numpy import ravel_multi_index
except ImportError:
raise nose.SkipTest()
# Dtype of random integers is system dependent
A, B, C, D = np.random.randint(0, 1000, size=[4, 100])
I1 = _rmi([A, B], dims=[1000, 1000])
I2 = ravel_multi_index([A, B], dims=[1000, 1000])
assert_array_equal(I1, I2)
I1 = _rmi([A, B, C, D], dims=[1000]*4)
I2 = ravel_multi_index([A, B, C, D], dims=[1000]*4)
assert_array_equal(I1, I2)
# Check for overflow with small int types
indices = np.random.randint(0, 255, size=(2, 100))
dims = (1000, 1000)
I1 = _rmi(indices, dims=dims)
I2 = ravel_multi_index(indices, dims=dims)
assert_array_equal(I1, I2)
def test_affine_for_trackvis():
voxel_size = np.array([1., 2, 3.])
affine = affine_for_trackvis(voxel_size)
origin = np.dot(affine, [0, 0, 0, 1])
assert_array_almost_equal(origin[:3], voxel_size / 2)
def test_length():
# Generate a simulated bundle of fibers:
n_streamlines=50
n_pts=100
t = np.linspace(-10, 10, n_pts)
bundle = []
for i in np.linspace(3, 5, n_streamlines):
pts = np.vstack((np.cos(2 * t/np.pi), np.zeros(t.shape) + i, t )).T
bundle.append(pts)
start = np.random.randint(10, 30, n_streamlines)
end = np.random.randint(60, 100, n_streamlines)
bundle = [10 * streamline[start[i]:end[i]] for (i, streamline) in
enumerate(bundle)]
bundle_lengths = length(bundle)
for idx, this_length in enumerate(bundle_lengths):
assert_equal(this_length, metrix.length(bundle[idx]))
def test_seeds_from_mask():
mask = np.random.random_integers(0, 1, size=(10, 10, 10))
seeds = seeds_from_mask(mask, density=1)
assert_equal(mask.sum(), len(seeds))
assert_array_equal(np.argwhere(mask), seeds)
mask[:] = False
mask[3, 3, 3] = True
seeds = seeds_from_mask(mask, density=[3, 4, 5])
assert_equal(len(seeds), 3 * 4 * 5)
assert_true(np.all((seeds > 2.5) & (seeds < 3.5)))
mask[4, 4, 4] = True
seeds = seeds_from_mask(mask, density=[3, 4, 5])
assert_equal(len(seeds), 2 * 3 * 4 * 5)
assert_true(np.all((seeds > 2.5) & (seeds < 4.5)))
in_333 = ((seeds > 2.5) & (seeds < 3.5)).all(1)
assert_equal(in_333.sum(), 3 * 4 * 5)
in_444 = ((seeds > 3.5) & (seeds < 4.5)).all(1)
assert_equal(in_444.sum(), 3 * 4 * 5)
def test_connectivity_matrix_shape():
# Labels: z-planes have labels 0,1,2
labels = np.zeros((3, 3, 3), dtype=int)
labels[:, :, 1] = 1
labels[:, :, 2] = 2
# Streamline set, only moves between first two z-planes.
streamlines = [np.array([[0., 0., 0.],
[0., 0., 0.5],
[0., 0., 1.]]),
np.array([[0., 1., 1.],
[0., 1., 0.5],
[0., 1., 0.]])]
matrix = connectivity_matrix(streamlines, labels, affine=np.eye(4))
assert_equal(matrix.shape, (3, 3))
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Shared utilities for building edges for graphs.
Helper functions here operate either on generic automaton graphs or specifically
on python ASTs.
"""
import collections
from typing import Dict, List, Optional, Set, Tuple
import gast
from gfsa import graph_types
JUMPS_OUT_OF_EDGE_TYPE = "EXTRA_JUMPS_OUT_OF"
PROGRAM_GRAPH_EDGE_TYPES = {
"PG_CFG_NEXT",
"PG_LAST_READ",
"PG_LAST_WRITE",
"PG_COMPUTED_FROM",
"PG_RETURNS_TO",
"PG_FORMAL_ARG_NAME",
"PG_NEXT_SYNTAX",
"PG_LAST_LEXICAL_USE",
"PG_CALLS",
}
SAME_IDENTIFIER_EDGE_TYPE = "EXTRA_SAME_IDENTIFIER"
def compute_jumps_out_edges(
tree,
ast_to_node_id,
from_return=True,
from_retval=True,
from_break_cont=True
):
"""Compute EXTRA_JUMPS_OUT_OF edges.
There are two types edges with this type:
- All break and continue statements should be connected to the enclosing loop.
- All return statements and return value expressions should be connected to
the function body.
This task is quite simple, but requires the automaton to learn non-trivial
behavior: it must remember whether it came from a break/continue or a return,
and it must use context to figure out whether a given expression is a return
value or some other type of expression.
Args:
tree: The AST to construct targets for.
ast_to_node_id: Dictionary that maps AST node ids to their graph node id.
from_return: Whether to include edges from return statements to their
containing function.
from_retval: Whether to include edges from return VALUES to their containing
function.
from_break_cont: Whether to include edges from break/continue statements to
their containing loop.
Returns:
List of "EXTRA_JUMPS_OUT_OF" edges.
"""
result = []
# pytype: disable=attribute-error
def _go(subtree, parent_func,
parent_loop):
"""Recursively process a subtree.
The high level strategy is to recursively walk down the tree. When we see
a function or loop node, we update the `parent_func` or `parent_loop`
argument, and then continue descending. When we reach a `break`, `continue`,
or `return`, we then connect these nodes to the corresponding innermost
function or loop.
Args:
subtree: Current subtree to process.
parent_func: The AST node corresponding to the (innermost) FunctionDef
that contains this subtree.
parent_loop: The AST node corresponding to the (innermost) For or While
loop that contains this subtree.
"""
if isinstance(subtree, gast.Return):
assert parent_func, "return outside function"
if from_return:
result.append((ast_to_node_id[id(subtree)],
ast_to_node_id[id(parent_func)], JUMPS_OUT_OF_EDGE_TYPE))
if from_retval and subtree.value:
result.append((ast_to_node_id[id(subtree.value)],
ast_to_node_id[id(parent_func)], JUMPS_OUT_OF_EDGE_TYPE))
elif isinstance(subtree, (gast.Break, gast.Continue)):
assert parent_loop, "break or continue outside loop"
if from_break_cont:
result.append((ast_to_node_id[id(subtree)],
ast_to_node_id[id(parent_loop)], JUMPS_OUT_OF_EDGE_TYPE))
elif isinstance(subtree, gast.FunctionDef):
# Update current function
for stmt in subtree.body:
_go(stmt, subtree, None)
elif isinstance(subtree, (gast.For, gast.While)):
# Update current loop
for stmt in subtree.body:
_go(stmt, parent_func, subtree)
else:
for child in gast.iter_child_nodes(subtree):
_go(child, parent_func, parent_loop)
# pytype: enable=attribute-error
_go(tree, None, None)
return result
def schema_edge_types(schema,
with_node_types = False):
"""Returns a list of schema edge types.
Args:
schema: Automaton graph schema to use.
with_node_types: Whether to include the node type of the source node.
Returns:
Set of schema edge types.
"""
result = set()
for node_type, node_schema in schema.items():
for out_type in node_schema.out_edges:
if with_node_types:
result.add(f"SCHEMA_{out_type}_FROM_{node_type}")
else:
result.add(f"SCHEMA_{out_type}")
return result
def compute_schema_edges(
graph,
with_node_types = False
):
"""Compute SCHEMA_* edges from the encoded graph.
We extract the outgoing edges that the automaton sees, but remove the sentinel
"missing" edges. Incoming edges are redundant and less informative so they
are not included.
Args:
graph: Automaton graph to use.
with_node_types: Whether to include the node type of the source node.
Returns:
List of schema edges.
"""
result = []
for source_node_id, node_info in graph.items():
for out_type, destinations in node_info.out_edges.items():
for in_tagged_node in destinations:
if not in_tagged_node.in_edge.endswith("_missing"):
if with_node_types:
edge_type = f"SCHEMA_{out_type}_FROM_{node_info.node_type}"
result.append((source_node_id, in_tagged_node.node_id, edge_type))
else:
edge_type = f"SCHEMA_{out_type}"
result.append((source_node_id, in_tagged_node.node_id, edge_type))
return result
def compute_same_identifier_edges(
tree, ast_to_node_id
):
"""Compute EXTRA_SAME_IDENTIFIER edges from an AST.
These edges connect any two `Name` nodes with the same identifier, including
Args:
tree: The AST to construct an example for.
ast_to_node_id: Dictionary that maps AST node ids to their graph node id.
Returns:
List of same-identifier edges.
"""
result = []
nodes_by_identifier = collections.defaultdict(list)
for ast_node in gast.walk(tree):
if isinstance(ast_node, gast.Name):
graph_node_id = ast_to_node_id[id(ast_node)]
identifier = ast_node.id # pytype: disable=attribute-error
for matching in nodes_by_identifier[identifier]:
result.append((graph_node_id, matching, SAME_IDENTIFIER_EDGE_TYPE))
result.append((matching, graph_node_id, SAME_IDENTIFIER_EDGE_TYPE))
nodes_by_identifier[identifier].append(graph_node_id)
result.append((graph_node_id, graph_node_id, SAME_IDENTIFIER_EDGE_TYPE))
return result
def nth_child_edge_types(max_child_count):
"""Constructs the edge types for nth-child edges.
Args:
max_child_count: Maximum number of children that get explicit nth-child
edges.
Returns:
Set of edge type names.
"""
return {f"CHILD_INDEX_{i}" for i in range(max_child_count)}
def compute_nth_child_edges(
graph, max_child_count
):
"""Computes CHILD_INDEX_* edges from a graph.
We assume that the graph was generated by `generic_ast_graphs.ast_to_graph`
and thus that sequence edges are represented with "{field}_out_first/last" and
sequence helper items. Note that the produced edges connect the parent node
to each of the child helper nodes, since those exist between the parent node
and the child AST nodes. This is done so that the edges for the field name and
the index align with each other (i.e. connect the same pair of nodes), to make
it easier to learn useful embeddings.
Args:
graph: Automaton graph to use.
max_child_count: Maximum number of children that get explicit nth-child
edges.
Returns:
List of CHILD_INDEX_* edges.
"""
result = []
for source_node_id, node_info in graph.items():
for out_type, destinations in node_info.out_edges.items():
if out_type.endswith("_out_first"):
# This is a sequence node, so we should add nth-child edges.
in_tagged_node, = destinations
i = 0
while (i < max_child_count and
not in_tagged_node.in_edge.endswith("_missing")):
result.append(
(source_node_id, in_tagged_node.node_id, f"CHILD_INDEX_{i}"))
# Follow the chain of next pointers.
in_tagged_node, = graph[in_tagged_node.node_id].out_edges["next_out"]
i += 1
return result
def encode_edges(
edges,
edge_types,
skip_unknown = False
):
"""Converts each string edge type into an index in `edge_types`.
Args:
edges: Edges to encode.
edge_types: Ordered list of types, used to determine the integer indices.
skip_unknown: Whether to ignore edge types that aren't in the ordered list.
Returns:
Encoded edges.
Raises:
KeyError: if skip_unknown=False and an edge type isn't recognized.
"""
type_to_idx_map = {type_name: i for i, type_name in enumerate(edge_types)}
if len(type_to_idx_map) != len(edge_types):
raise ValueError(f"Duplicate values in edge type list {edge_types}")
result = []
for s, d, t in edges:
if t in type_to_idx_map:
result.append((s, d, type_to_idx_map[t]))
elif not skip_unknown:
raise KeyError(f"Unrecognized edge type {t}")
return result
|
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
config.py
Configuration settings and storage.
"""
import logging
import collections
import ConfigParser
from locale import getdefaultlocale
DEF_ENC = getdefaultlocale()[1]
if DEF_ENC is None:
DEF_ENC = "UTF-8"
import directories
import weakref
log = logging.getLogger(__name__)
class Config(object):
def __init__(self, config_definitions):
log.info("Loading config...")
self.config = ConfigParser.RawConfigParser([], ConfigDict)
self.config.observers = {}
try:
self.config.read(self.getPath())
except Exception as e:
log.warn("Error while reading configuration file mcedit.ini: {0}".format(e))
self.transformConfig()
self._sections = {}
for (sectionKey, sectionName), items in config_definitions.iteritems():
self._sections[sectionKey] = ConfigSection(self.config, sectionName, items)
setattr(self, sectionKey, self._sections[sectionKey])
self.save()
def __getitem__(self, section):
return self._sections[section]
@staticmethod
def getPath():
return directories.configFilePath
@staticmethod
def transformKey(value, n=0):
if 'left' in value and len(value) > 5:
value = value[5:]
elif 'right' in value and len(value) > 6:
value = value[6:]
if 'a' <= value <= 'z':
value = value.replace(value[0], value[0].upper(), 1)
if n >= 36 and "Ctrl-" not in value:
value = "Ctrl-" + value
if value == "Mouse3":
value = "Button 3"
elif value == "Mouse4":
value = "Scroll Up"
elif value == "Mouse5":
value = "Scroll Down"
elif value == "Mouse6":
value = "Button 4"
elif value == "Mouse7":
value = "Button 5"
return value
@staticmethod
def convert(key):
vals = key.replace('-', ' ').translate(None, '()').lower().split(' ')
return vals[0] + "".join(x.title() for x in vals[1:])
def reset(self):
for section in self.config.sections():
self.config.remove_section(section)
def transformConfig(self):
if self.config.has_section("Version") and self.config.has_option("Version", "version"):
version = self.config.get("Version", "version")
else:
self.reset()
return
if version == "1.1.1.1":
n = 1
for (name, value) in self.config.items("Keys"):
if name != "Swap View" and name != "Toggle Fps Counter":
self.config.set("Keys", name, self.transformKey(value, n))
elif name == "Swap View":
self.config.set("Keys", "View Distance", self.transformKey(value, n))
self.config.set("Keys", "Swap View", "None")
elif name == "Toggle Fps Counter":
self.config.set("Keys", "Debug Overlay", self.transformKey(value, n))
self.config.set("Keys", "Toggle Fps Counter", "None")
n += 1
if self.config.get("Keys", "Brake") == "Space":
version = "1.1.2.0-update"
else:
version = "1.1.2.0-new"
self.config.set("Version", "version", version)
self.save()
def save(self):
try:
cf = file(self.getPath(), 'w')
self.config.write(cf)
cf.close()
except Exception as e:
log.error("Error saving configuration settings to mcedit.ini: {0}".format(e))
class ConfigSection(object):
def __init__(self, config, section, items):
self.section = section
if not config.has_section(section):
config.add_section(section)
self._items = {}
for item in items:
if isinstance(item, ConfigValue):
value = item
elif type(item[2]) in ListValue.allowedTypes:
value = ListValue(item[0], item[1], item[2])
else:
value = ConfigValue(item[0], item[1], item[2])
value.config = config
value.section = section
self._items[value.key] = value
value.get()
if value.section == "Keys" and value.config.get(value.section, value.name) == "Delete":
value.config.set(value.section, value.name, "Del")
def __getitem__(self, key):
return self._items[key]
def __getattr__(self, key):
return self.__getitem__(key)
def items(self):
return [(i.name, i.get()) for k, i in self._items.iteritems()]
class ConfigValue(object):
allowedTypes = [int, float, bool, basestring, str, unicode]
def __init__(self, key, name, default=None):
if default is None:
default = name
name = key
self.key = key
self.name = name
self.default = default
self.type = type(default)
if self.type not in self.allowedTypes:
raise TypeError("Invalid config type %s" % repr(self.type))
def get(self):
try:
if self.type is bool:
return self.config.getboolean(self.section, self.name)
if self.type is unicode:
return self.type(self.config.get(self.section, self.name).decode(DEF_ENC))
return self.type(self.config.get(self.section, self.name))
except:
if self.default is None:
raise
self.set(self.default)
return self.default
def getRaw(self):
return self.config.get(self.section, self.name)
def _setter(self, setter):
def _s(s, value):
if setter is not None:
setter(s, value)
return self.set(value)
return _s
def set(self, value):
log.debug("Property Change: %15s %30s = %s", self.section, self.name, value)
if self.type is unicode and isinstance(value, unicode):
value = value.encode(DEF_ENC)
self.config.set(self.section, self.name, str(value))
self._notifyObservers(value)
def addObserver(self, target, attr=None, callback=None):
""" Register 'target' for changes in the config var named by section and name.
When the config is changed, calls setattr with target and attr.
attr may be None; it will be created from the name by lowercasing the first
word, uppercasing the rest, and removing spaces.
e.g. "block buffer" becomes "blockBuffer"
"""
observers = self.config.observers.setdefault((self.section, self.name), {})
if not attr:
attr = self.key
log.debug("Subscribing %s.%s", target, attr)
attr = intern(attr)
targetref = weakref.ref(target)
observers.setdefault((targetref, attr), callback)
val = self.get()
setattr(target, attr, val)
if callback:
callback(val)
def _notifyObservers(self, value):
observers = self.config.observers.get((self.section, self.name), {})
newObservers = {}
for targetref, attr in observers:
target = targetref()
if target:
log.debug("Notifying %s", target)
setattr(target, attr, value)
callback = observers[targetref, attr]
if callback:
callback(value)
newObservers[targetref, attr] = callback
self.config.observers[(self.section, self.name)] = newObservers
def property(self, setter=None):
assert self.default is not None
this = self
def _getter(self):
return this.get()
return property(_getter, self._setter(setter), None)
def __repr__(self):
return "<%s>" % " ".join((
self.__class__.__name__,
"section=%r" % self.section,
"key=%r" % self.key,
"name=%r" % self.name,
"default=%s" % self.default,
"type=%s" % self.type
))
def __int__(self):
return int(self.get())
def __float__(self):
return float(self.get())
def __bool__(self):
return bool(self.get())
class ListValue(ConfigValue):
allowedTypes = [list, tuple]
def __init__(self, key, name, default=None):
if default is None or len(default) < 1:
raise ValueError("Default value %s is empty." % repr(default))
self.subtype = type(default[0])
super(ListValue, self).__init__(key, name, default)
def get(self):
try:
return self.type(self.subtype(x.strip()) for x in self._config.get(self.section, self.name).translate(None, '[]()').split(','))
except:
if self.default is None:
raise
self.set(self.default)
return self.default
def __repr__(self):
return "<%s>" % " ".join((
self.__class__.__name__,
"section=%r" % self.section,
"key=%r" % self.key,
"name=%r" % self.name,
"default=%s" % self.default,
"type=%s" % self.type,
"subtype=%s" % self.subtype
))
class ColorValue(ListValue):
allowedTypes = [tuple]
defaultColors = {}
def __init__(self, key, name, default=None):
super(ColorValue, self).__init__(key, name, default)
ColorValue.defaultColors[name] = self
def get(self):
values = super(ColorValue, self).get()
return tuple(min(max(x, 0.0), 1.0) for x in values)
class ConfigDict(collections.MutableMapping):
def __init__(self, *args, **kwargs):
self.dict = dict(*args, **kwargs)
self.keyorder = []
def keys(self):
return list(self.keyorder)
def items(self):
return list(self.__iteritems__())
def __iteritems__(self):
return ((k, self.dict[k]) for k in self.keys())
def __iter__(self):
return self.keys().__iter__()
def __getitem__(self, k):
return self.dict[k]
def __setitem__(self, k, v):
self.dict[k] = v
if k not in self.keyorder:
self.keyorder.append(k)
def __delitem__(self, k):
del self.dict[k]
if k in self.keyorder:
self.keyorder.remove(k)
def __contains__(self, k):
return self.dict.__contains__(k)
def __len__(self):
return self.dict.__len__()
def copy(self):
k = ConfigDict()
k.dict = self.dict.copy()
k.keyorder = list(self.keyorder)
return k
# Quick Reference:
# 7 Bedrock
# 9 Still_Water
# 11 Still_Lava
# 14 Gold_Ore
# 15 Iron_Ore
# 16 Coal_Ore
# 21 Lapis_Lazuli_Ore
# 24 Sandstone
# 49 Obsidian
# 56 Diamond_Ore
# 73 Redstone_Ore
# 129 Emerald_Ore
# 153 Nether_Quartz_Ore
hiddableOres = (7, 16, 15, 21, 73, 14, 56, 153)
definitions = {
("keys", "Keys"): [
("forward", "forward", "W"),
("back", "back", "S"),
("left", "left", "A"),
("right", "right", "D"),
("up", "up", "Space"),
("down", "down", "Shift"),
("brake", "brake", "C"),
("sprint", "sprint", "None"),
("rotateClone", "rotate (clone)", "E"),
("rollClone", "roll (clone)", "R"),
("flip", "flip", "F"),
("mirror", "mirror", "G"),
("rotateBrush", "rotate (brush)", "E"),
("rollBrush", "roll (brush)", "G"),
("increaseBrush", "increase brush", "R"),
("decreaseBrush", "decrease brush", "F"),
("replaceShortcut", "replace shortcut", "R"),
("swap", "swap", "X"),
("panLeft", "pan left", "J"),
("panRight", "pan right", "L"),
("panUp", "pan up", "I"),
("panDown", "pan down", "K"),
("toggleView", "toggle view", "Tab"),
("resetReach", "reset reach", "Button 3"),
("increaseReach", "increase reach", "Scroll Up"),
("decreaseReach", "decrease reach", "Scroll Down"),
("confirmConstruction", "confirm construction", "Return"),
("openLevel", "open level", "O"),
("newLevel", "new level", "N"),
("deleteBlocks", "delete blocks", "Del"),
("lineTool", "line tool", "Z"),
("longDistanceMode", "long-distance mode", "Alt-Z"),
("flyMode", "fly mode", "None"),
("debugOverlay", "debug overlay", "0"),
("showBlockInfo", "show block info", "Alt"),
("pickBlock", "pick block", "Alt"),
("selectChunks", "select chunks", "Z"),
("deselectChunks", "deselect chunks", "Alt"),
("brushLineTool", "brush line tool", "Z"),
("snapCloneToAxis", "snap clone to axis", "Ctrl"),
("blocksOnlyModifier", "blocks-only modifier", "Alt"),
("fastIncrementModifierHold", "fast increment modifier", "Ctrl"),
("fastNudge", "fast nudge", "None"),
("takeAScreenshot", "take a screenshot", "F6"),
("quit", "quit", "Ctrl-Q"),
("viewDistance", "view distance", "Ctrl-F"),
("selectAll", "select all", "Ctrl-A"),
("deselect", "deselect", "Ctrl-D"),
("cut", "cut", "Ctrl-X"),
("copy", "copy", "Ctrl-C"),
("paste", "paste", "Ctrl-V"),
("reloadWorld", "reload world", "Ctrl-R"),
("open", "open", "Ctrl-O"),
("quickLoad", "quick load", "Ctrl-L"),
("undo", "undo", "Ctrl-Z"),
("redo", "redo", "Ctrl-Y"),
("save", "save", "Ctrl-S"),
("saveAs", "save as", "Ctrl-Alt-S"),
("newWorld", "new world", "Ctrl-N"),
("closeWorld", "close world", "Ctrl-W"),
("worldInfo", "world info", "Ctrl-I"),
("gotoPanel", "goto panel", "Ctrl-G"),
("exportSelection", "export selection", "Ctrl-E"),
("toggleRenderer", "toggle renderer", "Ctrl-M"),
("uploadWorld", "upload world", "Ctrl-U"),
("select", "select", "1"),
("brush", "brush", "2"),
("clone", "clone", "3"),
("fillAndReplace", "fill and replace", "4"),
("filter", "filter", "5"),
("importKey", "import", "6"),
("players", "players", "7"),
("worldSpawnpoint", "world spawnpoint", "8"),
("chunkControl", "chunk control", "9"),
("nbtExplorer", "nbt explorer", "None"),
],
("version", "Version"): [
("version", "version", "1.1.2.0")
],
("settings", "Settings"): [
("flyMode", "Fly Mode", False),
("enableMouseLag", "Enable Mouse Lag", False),
("longDistanceMode", "Long Distance Mode", False),
("shouldResizeAlert", "Window Size Alert", True),
("closeMinecraftWarning", "Close Minecraft Warning", True),
("skin", "MCEdit Skin", "[Current]"),
("fov", "Field of View", 70.0),
("spaceHeight", "Space Height", 64),
("blockBuffer", "Block Buffer", 256 * 1048576),
("reportCrashes", "report crashes new", False),
("reportCrashesAsked", "report crashes asked", False),
("staticCommandsNudge", "Static Coords While Nudging", False),
("moveSpawnerPosNudge", "Change Spawners While Nudging", False),
("rotateBlockBrush", "rotateBlockBrushRow", True),
("langCode", "Language String", "en_US"),
("viewDistance", "View Distance", 8),
("targetFPS", "Target FPS", 30),
("windowWidth", "window width", 1152),
("windowHeight", "window height", 864),
("windowMaximized", "window maximized", False),
("windowMaximizedHeight", "window maximized height", 0),
("windowMaximizedWidth", "window maximized width", 0),
("windowX", "window x", 0),
("windowY", "window y", 0),
("windowShowCmd", "window showcmd", 1),
("setWindowPlacement", "SetWindowPlacement", True),
("showHiddenOres", "show hidden ores", False),
("hiddableOres", "hiddable ores", hiddableOres),
] + [
("showOre%s" % i, "show ore %s" % i, True) for i in hiddableOres
] + [
("fastLeaves", "fast leaves", True),
("roughGraphics", "rough graphics", False),
("showChunkRedraw", "show chunk redraw", True),
("drawSky", "draw sky", True),
("drawFog", "draw fog", True),
("showCeiling", "show ceiling", True),
("drawEntities", "draw entities", True),
("drawMonsters", "draw monsters", True),
("drawItems", "draw items", True),
("drawTileEntities", "draw tile entities", True),
("drawTileTicks", "draw tile ticks", False),
("drawUnpopulatedChunks", "draw unpopulated chunks", True),
("drawChunkBorders", "draw chunk borders", False),
("vertexBufferLimit", "vertex buffer limit", 384),
("vsync", "vertical sync", 0),
("viewMode", "View Mode", "Camera"),
("undoLimit", "Undo Limit", 20),
("recentWorlds", "Recent Worlds", ['']),
("resourcePack", "Resource Pack", u"Default"),
("maxCopies", "Copy stack size", 32),
("superSecretSettings", "Super Secret Settings", False),
("compassToggle", "Compass Toggle", True),
("compassSize", "Compass Size", 60),
("fogIntensity", "Fog Intensity", 20),
("fontProportion", "Fonts Proportion", 100),
("downloadPlayerSkins", "Download Player Skins", True),
("maxViewDistance", "Max View Distance", 32),
("drawPlayerHeads", "Draw Player Heads", True),
("showQuickBlockInfo", "Show Block Info when hovering", True),
("savePositionOnClose", "Save camera position on close", False),
("showWindowSizeWarning", "Show window size warning", True)
],
("controls", "Controls"): [
("mouseSpeed", "mouse speed", 5.0),
("cameraAccel", "camera acceleration", 125.0),
("cameraDrag", "camera drag", 100.0),
("cameraMaxSpeed", "camera maximum speed", 60.0),
("cameraBrakingSpeed", "camera braking speed", 8.0),
("invertMousePitch", "invert mouse pitch", False),
("autobrake", "autobrake", True),
("swapAxes", "swap axes looking down", False)
],
("brush", "Brush"): [
("brushSizeL", "Brush Shape L", 3),
("brushSizeH", "Brush Shape H", 3),
("brushSizeW", "Brush Shape W", 3),
("updateBrushOffset", "Update Brush Offset", False),
("chooseBlockImmediately", "Choose Block Immediately", False),
("alpha", "Alpha", 0.66)
],
("clone", "Clone"): [
("copyAir", "Copy Air", True),
("copyWater", "Copy Water", True),
("copyBiomes", "Copy Biomes", False),
("staticCommands", "Change Coordinates", False),
("moveSpawnerPos", "Change Spawners Pos", False),
("regenerateUUID", "Regenerate UUIDs", True),
("placeImmediately", "Place Immediately", True)
],
("fill", "Fill"): [
("chooseBlockImmediately", "Choose Block Immediately", True),
("chooseBlockImmediatelyReplace", "Choose Block Immediately for Replace", True)
],
("spawn", "Spawn"): [
("spawnProtection", "Spawn Protection", True)
],
("selection", "Selection"): [
("showPreviousSelection", "Show Previous Selection", True),
("color", "Color", "white")
],
("selectionColors", "Selection Colors"): [
ColorValue("white", "white", (1.0, 1.0, 1.0)),
ColorValue("blue", "blue", (0.75, 0.75, 1.0)),
ColorValue("green", "green", (0.75, 1.0, 0.75)),
ColorValue("red", "red", (1.0, 0.75, 0.75)),
ColorValue("teal", "teal", (0.75, 1.0, 1.0)),
ColorValue("pink", "pink", (1.0, 0.75, 1.0)),
ColorValue("yellow", "yellow", (1.0, 1.0, 0.75)),
ColorValue("grey", "grey", (0.6, 0.6, 0.6)),
ColorValue("black", "black", (0.0, 0.0, 0.0))
],
("fastNudgeSettings", "Fast Nudge Settings"): [
("blocksWidth", "Blocks Width", False),
("blocksWidthNumber", "Blocks Width Number", 16),
("selectionWidth", "Selection Width", False),
("selectionWidthNumber", "Selection Width Number", 16),
("pointsWidth", "Points Width", False),
("pointsWidthNumber", "Points Width Number", 16),
("cloneWidth", "clone Width", True),
("cloneWidthNumber", "Clone Width Number", 16),
("importWidth", "Import Width", False),
("importWidthNumber", "Import Width Number", 8),
],
("nbtTreeSettings", "NBT Tree Settings"): [
("useBulletStyles", "Use Bullet Styles", True),
("useBulletText", "Use Bullet Text", False),
("useBulletImages", "Use Bullet Images", True),
("defaultBulletImages", "Default Bullet Images", True),
("bulletFileName", "Bullet Images File", directories.os.path.join(directories.getDataDir(), 'Nbtsheet.png')),
("showAllTags", "Show all the tags in the tree", False),
],
("Filter Keys", "Filter Keys"): [],
("session", "Session",): [
("override", "Override", False)
],
("commands", "Commands"): [
("sorting", "Sorting", "chain"),
("space", "Space", True),
("fileFormat", "File Format", "txt")
],
("schematicCopying", "Schematics Copying"): [
("cancelCommandBlockOffset", "Cancel Command Block Offset", False)
]
}
config = None
if config is None:
config = Config(definitions)
|
|
from __future__ import absolute_import, division, print_function
import math
import re
from operator import getitem
from .compatibility import unicode
from .context import _globals
from .core import add, inc # noqa: F401
from .core import (istask, get_dependencies, subs, toposort, flatten,
reverse_dict, ishashable)
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out_keys = []
seen = set()
dependencies = dict()
work = list(set(flatten(keys)))
while work:
new_work = []
out_keys += work
deps = [(k, get_dependencies(dsk, k, as_list=True)) # fuse needs lists
for k in work]
dependencies.update(deps)
for _, deplist in deps:
for d in deplist:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
out = {k: dsk[k] for k in out_keys}
return out, dependencies
def default_fused_linear_keys_renamer(keys):
"""Create new keys for fused tasks"""
typ = type(keys[0])
if typ is str or typ is unicode:
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0])
return '-'.join(names)
elif (typ is tuple and len(keys[0]) > 0 and
isinstance(keys[0][0], (str, unicode))):
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0][0])
return ('-'.join(names),) + keys[0][1:]
else:
return None
def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):
""" Return new dask graph with linear sequence of tasks fused together.
If specified, the keys in ``keys`` keyword argument are *not* fused.
Supply ``dependencies`` from output of ``cull`` if available to avoid
recomputing dependencies.
**This function is mostly superseded by ``fuse``**
Parameters
----------
dsk: dict
keys: list
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
rename_keys: bool or func, optional
Whether to rename fused keys with ``default_fused_linear_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
func is also accepted, ``new_key = rename_keys(fused_key_list)``.
Examples
--------
>>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dsk, dependencies = fuse(d)
>>> dsk # doctest: +SKIP
{'a-b-c': (inc, (inc, 1)), 'c': 'a-b-c'}
>>> dsk, dependencies = fuse(d, rename_keys=False)
>>> dsk # doctest: +SKIP
{'c': (inc, (inc, 1))}
>>> dsk, dependencies = fuse(d, keys=['b'], rename_keys=False)
>>> dsk # doctest: +SKIP
{'b': (inc, 1), 'c': (inc, 'b')}
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k, as_list=True)
for k in dsk}
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = dependencies[parent]
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = dict(map(reversed, child2parent.items()))
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
chains.append(chain)
dependencies = {k: set(v) for k, v in dependencies.items()}
if rename_keys is True:
key_renamer = default_fused_linear_keys_renamer
elif rename_keys is False:
key_renamer = None
else:
key_renamer = rename_keys
# create a new dask with fused chains
rv = {}
fused = set()
aliases = set()
is_renamed = False
for chain in chains:
if key_renamer is not None:
new_key = key_renamer(chain)
is_renamed = (new_key is not None and new_key not in dsk and
new_key not in rv)
child = chain.pop()
val = dsk[child]
while chain:
parent = chain.pop()
dependencies[parent].update(dependencies.pop(child))
dependencies[parent].remove(child)
val = subs(dsk[parent], child, val)
fused.add(child)
child = parent
fused.add(child)
if is_renamed:
rv[new_key] = val
rv[child] = new_key
dependencies[new_key] = dependencies[child]
dependencies[child] = {new_key}
aliases.add(child)
else:
rv[child] = val
for key, val in dsk.items():
if key not in fused:
rv[key] = val
if aliases:
for key, deps in dependencies.items():
for old_key in deps & aliases:
new_key = rv[old_key]
deps.remove(old_key)
deps.add(new_key)
rv[key] = subs(rv[key], old_key, new_key)
if keys is not None:
for key in aliases - keys:
del rv[key]
del dependencies[key]
return rv, dependencies
def _flat_set(x):
if x is None:
return set()
elif isinstance(x, set):
return x
elif not isinstance(x, (list, set)):
x = [x]
return set(x)
def inline(dsk, keys=None, inline_constants=True, dependencies=None):
""" Return new dask with the given keys inlined with their values.
Inlines all constants if ``inline_constants`` keyword is True. Note that
the constant keys will remain in the graph, to remove them follow
``inline`` with ``cull``.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}
>>> inline(d) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, 'y')}
>>> inline(d, keys='y') # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, (inc, 1))}
>>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 'x', (inc, 'x'))}
"""
if dependencies and isinstance(next(iter(dependencies.values())), list):
dependencies = {k: set(v) for k, v in dependencies.items()}
keys = _flat_set(keys)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
if inline_constants:
keys.update(k for k, v in dsk.items() if
(ishashable(v) and v in dsk) or
(not dependencies[k] and not istask(v)))
# Keys may depend on other keys, so determine replace order with toposort.
# The values stored in `keysubs` do not include other keys.
replaceorder = toposort(dict((k, dsk[k]) for k in keys if k in dsk),
dependencies=dependencies)
keysubs = {}
for key in replaceorder:
val = dsk[key]
for dep in keys & dependencies[key]:
if dep in keysubs:
replace = keysubs[dep]
else:
replace = dsk[dep]
val = subs(val, dep, replace)
keysubs[key] = val
# Make new dask with substitutions
dsk2 = keysubs.copy()
for key, val in dsk.items():
if key not in dsk2:
for item in keys & dependencies[key]:
val = subs(val, item, keysubs[item])
dsk2[key] = val
return dsk2
def inline_functions(dsk, output, fast_functions=None, inline_constants=False,
dependencies=None):
""" Inline cheap functions into larger operations
Examples
--------
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline_functions(dsk, [], [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
Protect output keys. In the example below ``i`` is not inlined because it
is marked as an output key.
>>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP
{'out': (add, 'i', (double, 'y')),
'i': (inc, 'x'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
output = set(output)
fast_functions = set(fast_functions)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v) and functions_of(v).issubset(fast_functions) and
dependents[k] and k not in output
]
if keys:
dsk = inline(dsk, keys, inline_constants=inline_constants,
dependencies=dependencies)
for k in keys:
del dsk[k]
return dsk
def unwrap_partial(func):
while hasattr(func, 'func'):
func = func.func
return func
def functions_of(task):
""" Set of functions contained within nested task
Examples
--------
>>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP
>>> functions_of(task) # doctest: +SKIP
set([add, mul, inc])
"""
funcs = set()
work = [task]
sequence_types = {list, tuple}
while work:
new_work = []
for task in work:
if type(task) in sequence_types:
if istask(task):
funcs.add(unwrap_partial(task[0]))
new_work += task[1:]
else:
new_work += task
work = new_work
return funcs
def fuse_selections(dsk, head1, head2, merge):
"""Fuse selections with lower operation.
Handles graphs of the form:
``{key1: (head1, key2, ...), key2: (head2, ...)}``
Parameters
----------
dsk : dict
dask graph
head1 : function
The first element of task1
head2 : function
The first element of task2
merge : function
Takes ``task1`` and ``task2`` and returns a merged task to
replace ``task1``.
Examples
--------
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> merge = lambda t1, t2: (load, t2[1], t2[2], t1[2])
>>> dsk2 = fuse_selections(dsk, getitem, load, merge)
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
dsk2 = dict()
for k, v in dsk.items():
try:
if (istask(v) and v[0] == head1 and v[1] in dsk and
istask(dsk[v[1]]) and dsk[v[1]][0] == head2):
dsk2[k] = merge(v, dsk[v[1]])
else:
dsk2[k] = v
except TypeError:
dsk2[k] = v
return dsk2
def fuse_getitem(dsk, func, place):
""" Fuse getitem with lower operation
Parameters
----------
dsk: dict
dask graph
func: function
A function in a task to merge
place: int
Location in task to insert the getitem key
Examples
--------
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> dsk2 = fuse_getitem(dsk, load, 3) # columns in arg place 3
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
return fuse_selections(dsk, getitem, func,
lambda a, b: tuple(b[:place]) + (a[2], ) + tuple(b[place + 1:]))
def default_fused_keys_renamer(keys):
"""Create new keys for ``fuse`` tasks"""
it = reversed(keys)
first_key = next(it)
typ = type(first_key)
if typ is str or typ is unicode:
first_name = key_split(first_key)
names = {key_split(k) for k in it}
names.discard(first_name)
names = sorted(names)
names.append(first_key)
return '-'.join(names)
elif (typ is tuple and len(first_key) > 0 and
isinstance(first_key[0], (str, unicode))):
first_name = key_split(first_key)
names = {key_split(k) for k in it}
names.discard(first_name)
names = sorted(names)
names.append(first_key[0])
return ('-'.join(names),) + first_key[1:]
def fuse(dsk, keys=None, dependencies=None, ave_width=None, max_width=None,
max_height=None, max_depth_new_edges=None, rename_keys=None):
""" Fuse tasks that form reductions; more advanced than ``fuse_linear``
This trades parallelism opportunities for faster scheduling by making tasks
less granular. It can replace ``fuse_linear`` in optimization passes.
This optimization applies to all reductions--tasks that have at most one
dependent--so it may be viewed as fusing "multiple input, single output"
groups of tasks into a single task. There are many parameters to fine
tune the behavior, which are described below. ``ave_width`` is the
natural parameter with which to compare parallelism to granularity, so
it should always be specified. Reasonable values for other parameters
with be determined using ``ave_width`` if necessary.
Parameters
----------
dsk: dict
dask graph
keys: list or set, optional
Keys that must remain in the returned dask graph
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
ave_width: float (default 2)
Upper limit for ``width = num_nodes / height``, a good measure of
parallelizability
max_width: int
Don't fuse if total width is greater than this
max_height: int
Don't fuse more than this many levels
max_depth_new_edges: int
Don't fuse if new dependencies are added after this many levels
rename_keys: bool or func, optional
Whether to rename the fused keys with ``default_fused_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
function to create the new name is also accepted.
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
# Assign reasonable, not too restrictive defaults
if ave_width is None:
if _globals.get('fuse_ave_width') is None:
ave_width = 1
else:
ave_width = _globals['fuse_ave_width']
if max_height is None:
if _globals.get('fuse_max_height') is None:
max_height = len(dsk)
else:
max_height = _globals['fuse_max_height']
max_depth_new_edges = (
max_depth_new_edges or
_globals.get('fuse_max_depth_new_edges') or
ave_width + 1.5
)
max_width = (
max_width or
_globals.get('fuse_max_width') or
1.5 + ave_width * math.log(ave_width + 1)
)
if not ave_width or not max_height:
return dsk, dependencies
if rename_keys is None:
rename_keys = _globals.get('fuse_rename_keys', True)
if rename_keys is True:
key_renamer = default_fused_keys_renamer
elif rename_keys is False:
key_renamer = None
else:
key_renamer = rename_keys
if dependencies is None:
deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}
else:
deps = dict(dependencies)
rdeps = {}
for k, vals in deps.items():
for v in vals:
if v not in rdeps:
rdeps[v] = [k]
else:
rdeps[v].append(k)
deps[k] = set(vals)
reducible = {k for k, vals in rdeps.items() if len(vals) == 1}
if keys:
reducible -= keys
if not reducible:
return dsk, deps
rv = dsk.copy()
fused_trees = {}
# These are the stacks we use to store data as we traverse the graph
info_stack = []
children_stack = []
# For speed
deps_pop = deps.pop
reducible_add = reducible.add
reducible_pop = reducible.pop
reducible_remove = reducible.remove
fused_trees_pop = fused_trees.pop
info_stack_append = info_stack.append
info_stack_pop = info_stack.pop
children_stack_append = children_stack.append
children_stack_extend = children_stack.extend
children_stack_pop = children_stack.pop
while reducible:
parent = reducible_pop()
reducible_add(parent)
while parent in reducible:
# Go to the top
parent = rdeps[parent][0]
children_stack_append(parent)
children_stack_extend(reducible & deps[parent])
while True:
child = children_stack[-1]
if child != parent:
children = reducible & deps[child]
while children:
# Depth-first search
children_stack_extend(children)
parent = child
child = children_stack[-1]
children = reducible & deps[child]
else:
children_stack_pop()
# This is a leaf node in the reduction region
# key, task, fused_keys, height, width, number of nodes, fudge, set of edges
info_stack_append((child, rv[child], None if key_renamer is None else [child],
1, 1, 1, 0, deps[child] - reducible))
else:
children_stack_pop()
# Calculate metrics and fuse as appropriate
deps_parent = deps[parent]
edges = deps_parent - reducible
children = deps_parent - edges
num_children = len(children)
if num_children == 1:
(child_key, child_task, child_keys, height, width, num_nodes, fudge,
children_edges) = info_stack_pop()
num_children_edges = len(children_edges)
if fudge > num_children_edges - 1 >= 0:
fudge = num_children_edges - 1
edges |= children_edges
no_new_edges = len(edges) == num_children_edges
if not no_new_edges:
fudge += 1
if (
(num_nodes + fudge) / height <= ave_width and
# Sanity check; don't go too deep if new levels introduce new edge dependencies
(no_new_edges or height < max_depth_new_edges)
):
# Perform substitutions as we go
val = subs(dsk[parent], child_key, child_task)
deps_parent.remove(child_key)
deps_parent |= deps_pop(child_key)
del rv[child_key]
reducible_remove(child_key)
if key_renamer is not None:
child_keys.append(parent)
fused_trees[parent] = child_keys
fused_trees_pop(child_key, None)
if children_stack:
if no_new_edges:
# Linear fuse
info_stack_append((parent, val, child_keys, height, width, num_nodes, fudge, edges))
else:
info_stack_append((parent, val, child_keys, height + 1, width, num_nodes + 1, fudge,
edges))
else:
rv[parent] = val
break
else:
rv[child_key] = child_task
reducible_remove(child_key)
if children_stack:
# Allow the parent to be fused, but only under strict circumstances.
# Ensure that linear chains may still be fused.
if fudge > int(ave_width - 1):
fudge = int(ave_width - 1)
# This task *implicitly* depends on `edges`
info_stack_append((parent, rv[parent], None if key_renamer is None else [parent],
1, width, 1, fudge, edges))
else:
break
else:
child_keys = []
height = 1
width = 0
num_single_nodes = 0
num_nodes = 0
fudge = 0
children_edges = set()
max_num_edges = 0
children_info = info_stack[-num_children:]
del info_stack[-num_children:]
for cur_key, cur_task, cur_keys, cur_height, cur_width, cur_num_nodes, cur_fudge, \
cur_edges in children_info:
if cur_height == 1:
num_single_nodes += 1
elif cur_height > height:
height = cur_height
width += cur_width
num_nodes += cur_num_nodes
fudge += cur_fudge
if len(cur_edges) > max_num_edges:
max_num_edges = len(cur_edges)
children_edges |= cur_edges
# Fudge factor to account for possible parallelism with the boundaries
num_children_edges = len(children_edges)
fudge += min(num_children - 1, max(0, num_children_edges - max_num_edges))
if fudge > num_children_edges - 1 >= 0:
fudge = num_children_edges - 1
edges |= children_edges
no_new_edges = len(edges) == num_children_edges
if not no_new_edges:
fudge += 1
if (
(num_nodes + fudge) / height <= ave_width and
num_single_nodes <= ave_width and
width <= max_width and
height <= max_height and
# Sanity check; don't go too deep if new levels introduce new edge dependencies
(no_new_edges or height < max_depth_new_edges)
):
# Perform substitutions as we go
val = dsk[parent]
children_deps = set()
for child_info in children_info:
cur_child = child_info[0]
val = subs(val, cur_child, child_info[1])
del rv[cur_child]
children_deps |= deps_pop(cur_child)
reducible_remove(cur_child)
if key_renamer is not None:
fused_trees_pop(cur_child, None)
child_keys.extend(child_info[2])
deps_parent -= children
deps_parent |= children_deps
if key_renamer is not None:
child_keys.append(parent)
fused_trees[parent] = child_keys
if children_stack:
info_stack_append((parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges))
else:
rv[parent] = val
break
else:
for child_info in children_info:
rv[child_info[0]] = child_info[1]
reducible_remove(child_info[0])
if children_stack:
# Allow the parent to be fused, but only under strict circumstances.
# Ensure that linear chains may still be fused.
if width > max_width:
width = max_width
if fudge > int(ave_width - 1):
fudge = int(ave_width - 1)
# key, task, height, width, number of nodes, fudge, set of edges
# This task *implicitly* depends on `edges`
info_stack_append((parent, rv[parent], None if key_renamer is None else [parent],
1, width, 1, fudge, edges))
else:
break
# Traverse upwards
parent = rdeps[parent][0]
if key_renamer is not None:
for root_key, fused_keys in fused_trees.items():
alias = key_renamer(fused_keys)
if alias is not None and alias not in rv:
rv[alias] = rv[root_key]
rv[root_key] = alias
deps[alias] = deps[root_key]
deps[root_key] = {alias}
return rv, deps
# Defining `key_split` (used by key renamers in `fuse`) in utils.py
# results in messy circular imports, so define it here instead.
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
u'x'
>>> key_split('x-1')
u'x'
>>> key_split('x-1-2-3')
u'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
u'x'
>>> key_split('hello-world-1')
u'hello-world'
>>> key_split(b'hello-world-1')
u'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
u'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
u'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].lstrip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
|
|
# Copyright 2015 StackHut Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
StackHut interface and modifications to Barrister RPC library
"""
import os
import json
import uuid
import signal
from enum import Enum
import sh
from ..barrister import err_response, ERR_PARSE, ERR_INVALID_REQ, ERR_METHOD_NOT_FOUND, \
ERR_INVALID_PARAMS, ERR_INTERNAL, ERR_UNKNOWN, ERR_INVALID_RESP, \
parse, contract_from_file, RpcException
from ..utils import log
CONTRACTFILE = '.api.json'
IDLFILE = 'api.idl'
REQ_FIFO = '.req.json'
RESP_FIFO = '.resp.json'
"""
High-level interface into the IDL file
- based on the JSON compiled output that is parsed into an AST
- used from runtime introspection
"""
class ContactTypes(Enum):
int = 1
string = 2
bool = 3
array = 4
obj = 5
def render_signature(func):
def render_params(p):
pp_p = "{} {}".format(p.type, p.name)
return '[]' + pp_p if p.is_array else pp_p
params_t = str.join(', ', [render_params(p) for p in func.params])
if func.returns is not None:
return "{}({}) {}".format(func.name, params_t, render_params(func.returns))
else:
return "{}({}) {}".format(func.name, params_t)
def load_contract_file():
return contract_from_file(CONTRACTFILE)
def generate_contract_file():
"""
Generate the IDL -> JSON Contract file
main interface into barrister parser
"""
if not os.path.exists(IDLFILE):
raise AssertionError("Cannot find 'api.idl' interface definition file")
with open(IDLFILE, 'r') as idl_file, open(CONTRACTFILE, "w") as contract_file:
parsed = parse(idl_file, IDLFILE)
contract_file.write(json.dumps(parsed, indent=4))
####################################################################################################
# Error handling
ERR_SERVICE = -32002
class ParseError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_PARSE, 'Parse Error', data)
class InvalidReqError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_INVALID_REQ, 'Invalid Request', data)
class MethodNotFoundError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_METHOD_NOT_FOUND, 'Method Not Found', data)
class InternalError(RpcException):
def __init__(self, msg='', data=None):
super().__init__(ERR_INTERNAL, 'Internal Error - {}'.format(msg), data)
class ServiceError(RpcException):
def __init__(self, msg, data=None):
super().__init__(ERR_SERVICE, 'Service Error - {}'.format(msg), data)
class CustomError(RpcException):
def __init__(self, code, msg, data=None):
super().__init__(code, 'Error - {}'.format(msg), data)
class NonZeroExitError(RpcException):
def __init__(self, exit_code, stderr):
data = dict(exit_code=exit_code, stderr=stderr)
super().__init__(-32001, 'Sub-command returned a non-zero exit', data)
def exc_to_json_error(e, req_id=None):
return err_response(req_id, e.code, e.msg, e.data)
from enum import Enum
class SHCmds(Enum):
startup = 1
shutdown = 2
preBatch = 3
postBatch = 4
def add_get_id(d):
"""add id to json rpc if not present"""
if 'id' not in d:
d['id'] = str(uuid.uuid4())
return d['id']
class StackHutRPC:
"""
Alt. implementation of Barrister.server modified for StackHut needs
Performs
* 'Type'-checking of requests and responces per interface def
* loading the lang-specfic shim/client
* passing messages between the runner and shim/client process
"""
def __init__(self, backend, shim_cmd):
self.contract = contract_from_file(CONTRACTFILE)
self.backend = backend
# setup fifos
os.mkfifo(REQ_FIFO)
os.mkfifo(RESP_FIFO)
# run the shim
cmd = sh.Command(shim_cmd[0])
self.p = cmd(shim_cmd[1:], _bg=True, _out=lambda x: log.debug("Runner - {}".format(x.rstrip())),
_err=lambda x: log.error("Runner - {}".format(x.rstrip())))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
def handler(signum, frame):
log.error("Force-quitting RPC subprocess")
self.p.kill()
raise TimeoutError()
# Set the signal handler and a 5-second alarm
signal.signal(signal.SIGALRM, handler)
signal.alarm(5)
# send shutdown msg to each iface
for iface in self.contract.interfaces.keys():
log.debug("Send shutdown to {}".format(iface))
self._cmd_call('{}.{}'.format(iface, SHCmds.shutdown.name))
log.debug("Terminating RPC sub-process")
try:
self.p.terminate()
self.p.wait()
except sh.SignalException_15:
log.warn("RPC subprocess shutdown uncleanly")
pass
signal.alarm(0)
def _cmd_call(self, cmd):
log.debug('Sending cmd message - {}'.format(cmd))
resp = self._sub_call(cmd, [], 'shcmd')
log.debug("Cmd response - {}".format(resp))
def _req_call(self, req):
"""Make RPC call for a single request"""
req_id = None
try:
if type(req) is not dict:
raise InvalidReqError(dict(msg="%s is not an object.".format(req)))
# massage the data (if needed)
req_id = add_get_id(req)
if 'jsonrpc' not in req:
req['jsonrpc'] = "2.0"
if "method" not in req:
raise InvalidReqError(dict(msg="No method"))
# return the idl - TODO - move into Scala
if req['method'] == "common.barrister-idl" or req['method'] == "getIdl":
return self.contract.idl_parsed
# add the default interface if none exists
if req['method'].find('.') < 0:
req['method'] = "{}.{}".format('Default', req['method'])
# NOTE - would setup context and run pre/post filters here in Barrister
# Ok, - we're good to go
method = req["method"]
iface_name, func_name = method.split('.')
params = req.get('params', [])
self.contract.validate_request(iface_name, func_name, params)
result = self._sub_call(method, params, req_id)
self.contract.validate_response(iface_name, func_name, result)
resp = dict(jsonrpc="2.0", id=req_id, result=result)
except RpcException as e:
resp = exc_to_json_error(e, req_id)
except Exception as e:
_e = InternalError('Exception', dict(exception=repr(e)))
resp = exc_to_json_error(_e, req_id)
return resp
def _sub_call(self, method, params, req_id):
"""Acutal call to the shim/client subprocess"""
self.backend.create_request_dir(req_id)
# create the (sub-)req
sub_req = dict(method=method, params=params, req_id=req_id)
# blocking-wait to send the request
with open(REQ_FIFO, "w") as f:
f.write(json.dumps(sub_req))
# blocking-wait to read the resp
with open(RESP_FIFO, "r") as f:
sub_resp = json.loads(f.read())
# check the response
if 'error' in sub_resp:
error_code = sub_resp['error']
log.debug(sub_resp)
if error_code == ERR_METHOD_NOT_FOUND:
raise MethodNotFoundError()
elif error_code == ERR_INTERNAL:
raise InternalError(sub_resp['msg'], sub_resp['data'])
else:
raise CustomError(error_code, sub_resp['msg'], sub_resp['data'])
self.backend.del_request_dir(req_id)
# validate and return the response
result = sub_resp['result']
return result
def call(self, task_req):
"""Make RPC call for given task"""
# Massage the data
try:
req = task_req['request']
if type(req) is list:
if len(req) < 1:
return exc_to_json_error(InvalidReqError(data=dict(msg="Empty Batch")))
# find batch interface
iface_name = None
first_method = req[0].get('method', None)
if first_method:
iface_name = 'Default' if first_method.find('.') < 0 else first_method.split('.')[0]
if iface_name:
self._cmd_call('{}.{}'.format(iface_name, SHCmds.preBatch.name))
task_resp = [self._req_call(r) for r in req]
if iface_name:
self._cmd_call('{}.{}'.format(iface_name, SHCmds.postBatch.name))
else:
task_resp = self._req_call(req)
except Exception as e:
task_resp = exc_to_json_error(InternalError(repr(e)))
return task_resp
|
|
# Natural Language Toolkit: IPI PAN Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Konrad Goluchowski <kodie@mimuw.edu.pl>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import functools
from .util import StreamBackedCorpusView, concat
from .api import CorpusReader
def _parse_args(fun):
@functools.wraps(fun)
def decorator(self, fileids=None, **kwargs):
kwargs.pop('tags', None)
if not fileids:
fileids = self.fileids()
return fun(self, fileids, **kwargs)
return decorator
class IPIPANCorpusReader(CorpusReader):
"""
Corpus reader designed to work with corpus created by IPI PAN.
See http://korpus.pl/en/ for more details about IPI PAN corpus.
The corpus includes information about text domain, channel and categories.
You can access possible values using ``domains()``, ``channels()`` and
``categories()``. You can use also this metadata to filter files, e.g.:
``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``.
The reader supports methods: words, sents, paras and their tagged versions.
You can get part of speech instead of full tag by giving "simplify_tags=True"
parameter, e.g.: ``tagged_sents(simplify_tags=True)``.
Also you can get all tags disambiguated tags specifying parameter
"one_tag=False", e.g.: ``tagged_paras(one_tag=False)``.
You can get all tags that were assigned by a morphological analyzer specifying
parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``.
The IPIPAN Corpus contains tags indicating if there is a space between two
tokens. To add special "no space" markers, you should specify parameter
"append_no_space=True", e.g. ``tagged_words(append_no_space=True)``.
As a result in place where there should be no space between two tokens new
pair ('', 'no-space') will be inserted (for tagged data) and just '' for
methods without tags.
The corpus reader can also try to append spaces between words. To enable this
option, specify parameter "append_space=True", e.g. ``words(append_space=True)``.
As a result either ' ' or (' ', 'space') will be inserted between tokens.
By default, xml entities like " and & are replaced by corresponding
characters. You can turn off this feature, specifying parameter
"replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``.
"""
def __init__(self, root, fileids):
CorpusReader.__init__(self, root, fileids, None, None)
def raw(self, fileids=None):
if not fileids:
fileids = self.fileids()
return ''.join([open(fileid, 'r').read()
for fileid in self._list_morph_files(fileids)])
def channels(self, fileids=None):
if not fileids:
fileids = self.fileids()
return self._parse_header(fileids, 'channel')
def domains(self, fileids=None):
if not fileids:
fileids = self.fileids()
return self._parse_header(fileids, 'domain')
def categories(self, fileids=None):
if not fileids:
fileids = self.fileids()
return [self._map_category(cat)
for cat in self._parse_header(fileids, 'keyTerm')]
def fileids(self, channels=None, domains=None, categories=None):
if channels is not None and domains is not None and \
categories is not None:
raise ValueError('You can specify only one of channels, domains '
'and categories parameter at once')
if channels is None and domains is None and \
categories is None:
return CorpusReader.fileids(self)
if isinstance(channels, basestring):
channels = [channels]
if isinstance(domains, basestring):
domains = [domains]
if isinstance(categories, basestring):
categories = [categories]
if channels:
return self._list_morph_files_by('channel', channels)
elif domains:
return self._list_morph_files_by('domain', domains)
else:
return self._list_morph_files_by('keyTerm', categories,
map=self._map_category)
@_parse_args
def sents(self, fileids=None, **kwargs):
return concat([self._view(fileid,
mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def paras(self, fileids=None, **kwargs):
return concat([self._view(fileid,
mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def words(self, fileids=None, **kwargs):
return concat([self._view(fileid, tags=False, **kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def tagged_sents(self, fileids=None, **kwargs):
return concat([self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE,
**kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def tagged_paras(self, fileids=None, **kwargs):
return concat([self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE,
**kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def tagged_words(self, fileids=None, **kwargs):
return concat([self._view(fileid, **kwargs)
for fileid in self._list_morph_files(fileids)])
def _list_morph_files(self, fileids):
return [f for f in self.abspaths(fileids)]
def _list_header_files(self, fileids):
return [f.replace('morph.xml', 'header.xml')
for f in self._list_morph_files(fileids)]
def _parse_header(self, fileids, tag):
values = set()
for f in self._list_header_files(fileids):
values_list = self._get_tag(f, tag)
for v in values_list:
values.add(v)
return list(values)
def _list_morph_files_by(self, tag, values, map=None):
fileids = self.fileids()
ret_fileids = set()
for f in fileids:
fp = self.abspath(f).replace('morph.xml', 'header.xml')
values_list = self._get_tag(fp, tag)
for value in values_list:
if map is not None:
value = map(value)
if value in values:
ret_fileids.add(f)
return list(ret_fileids)
def _get_tag(self, f, tag):
tags = []
header = open(f, 'r').read()
tag_end = 0
while True:
tag_pos = header.find('<'+tag, tag_end)
if tag_pos < 0: return tags
tag_end = header.find('</'+tag+'>', tag_pos)
tags.append(header[tag_pos+len(tag)+2:tag_end])
def _map_category(self, cat):
pos = cat.find('>')
if pos == -1:
return cat
else:
return cat[pos+1:]
def _view(self, filename, **kwargs):
tags = kwargs.pop('tags', True)
mode = kwargs.pop('mode', 0)
simplify_tags = kwargs.pop('simplify_tags', False)
one_tag = kwargs.pop('one_tag', True)
disamb_only = kwargs.pop('disamb_only', True)
append_no_space = kwargs.pop('append_no_space', False)
append_space = kwargs.pop('append_space', False)
replace_xmlentities = kwargs.pop('replace_xmlentities', True)
if len(kwargs) > 0:
raise ValueError('Unexpected arguments: %s' % kwargs.keys())
if not one_tag and not disamb_only:
raise ValueError('You cannot specify both one_tag=False and '
'disamb_only=False')
if not tags and (simplify_tags or not one_tag or not disamb_only):
raise ValueError('You cannot specify simplify_tags, one_tag or '
'disamb_only with functions other than tagged_*')
return IPIPANCorpusView(filename,
tags=tags, mode=mode, simplify_tags=simplify_tags,
one_tag=one_tag, disamb_only=disamb_only,
append_no_space=append_no_space,
append_space=append_space,
replace_xmlentities=replace_xmlentities
)
class IPIPANCorpusView(StreamBackedCorpusView):
WORDS_MODE = 0
SENTS_MODE = 1
PARAS_MODE = 2
def __init__(self, filename, startpos=0, **kwargs):
StreamBackedCorpusView.__init__(self, filename, None, startpos, None)
self.in_sentence = False
self.position = 0
self.show_tags = kwargs.pop('tags', True)
self.disamb_only = kwargs.pop('disamb_only', True)
self.mode = kwargs.pop('mode', IPIPANCorpusView.WORDS_MODE)
self.simplify_tags = kwargs.pop('simplify_tags', False)
self.one_tag = kwargs.pop('one_tag', True)
self.append_no_space = kwargs.pop('append_no_space', False)
self.append_space = kwargs.pop('append_space', False)
self.replace_xmlentities = kwargs.pop('replace_xmlentities', True)
def read_block(self, stream):
sentence = []
sentences = []
space = False
no_space = False
tags = set()
lines = self._read_data(stream)
while True:
# we may have only part of last line
if len(lines) <= 1:
self._seek(stream)
lines = self._read_data(stream)
if lines == ['']:
assert not sentences
return []
line = lines.pop()
self.position += len(line) + 1
if line.startswith('<chunk type="s"'):
self.in_sentence = True
elif line.startswith('<chunk type="p"'):
pass
elif line.startswith('<tok'):
if self.append_space and space and not no_space:
self._append_space(sentence)
space = True
no_space = False
orth = ""
tags = set()
elif line.startswith('</chunk'):
if self.in_sentence:
self.in_sentence = False
self._seek(stream)
if self.mode == self.SENTS_MODE:
return [sentence]
elif self.mode == self.WORDS_MODE:
if self.append_space:
self._append_space(sentence)
return sentence
else:
sentences.append(sentence)
elif self.mode == self.PARAS_MODE:
self._seek(stream)
return [sentences]
elif line.startswith('<orth'):
orth = line[6:-7]
if self.replace_xmlentities:
orth = orth.replace('"', '"').replace('&', '&')
elif line.startswith('<lex'):
if not self.disamb_only or line.find('disamb=') != -1:
tag = line[line.index('<ctag')+6 : line.index('</ctag') ]
tags.add(tag)
elif line.startswith('</tok'):
if self.show_tags:
if self.simplify_tags:
tags = [t.split(':')[0] for t in tags]
if not self.one_tag or not self.disamb_only:
sentence.append((orth, tuple(tags)))
else:
sentence.append((orth, tags.pop()))
else:
sentence.append(orth)
elif line.startswith('<ns/>'):
if self.append_space:
no_space = True
if self.append_no_space:
if self.show_tags:
sentence.append(('', 'no-space'))
else:
sentence.append('')
elif line.startswith('</cesAna'):
pass
def _read_data(self, stream):
self.position = stream.tell()
buff = stream.read(4096)
lines = buff.split('\n')
lines.reverse()
return lines
def _seek(self, stream):
stream.seek(self.position)
def _append_space(self, sentence):
if self.show_tags:
sentence.append((' ', 'space'))
else:
sentence.append(' ')
|
|
import sendgrid
import os
sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
##################################################
# Retrieve all mail settings #
# GET /mail_settings #
params = {'limit': 1, 'offset': 1}
response = sg.client.mail_settings.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update address whitelist mail settings #
# PATCH /mail_settings/address_whitelist #
data = {
"enabled": True,
"list": [
"email1@example.com",
"example.com"
]
}
response = sg.client.mail_settings.address_whitelist.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve address whitelist mail settings #
# GET /mail_settings/address_whitelist #
response = sg.client.mail_settings.address_whitelist.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update BCC mail settings #
# PATCH /mail_settings/bcc #
data = {
"email": "email@example.com",
"enabled": False
}
response = sg.client.mail_settings.bcc.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve all BCC mail settings #
# GET /mail_settings/bcc #
response = sg.client.mail_settings.bcc.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update bounce purge mail settings #
# PATCH /mail_settings/bounce_purge #
data = {
"enabled": True,
"hard_bounces": 5,
"soft_bounces": 5
}
response = sg.client.mail_settings.bounce_purge.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve bounce purge mail settings #
# GET /mail_settings/bounce_purge #
response = sg.client.mail_settings.bounce_purge.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update footer mail settings #
# PATCH /mail_settings/footer #
data = {
"enabled": True,
"html_content": "...",
"plain_content": "..."
}
response = sg.client.mail_settings.footer.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve footer mail settings #
# GET /mail_settings/footer #
response = sg.client.mail_settings.footer.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update forward bounce mail settings #
# PATCH /mail_settings/forward_bounce #
data = {
"email": "example@example.com",
"enabled": True
}
response = sg.client.mail_settings.forward_bounce.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve forward bounce mail settings #
# GET /mail_settings/forward_bounce #
response = sg.client.mail_settings.forward_bounce.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update forward spam mail settings #
# PATCH /mail_settings/forward_spam #
data = {
"email": "",
"enabled": False
}
response = sg.client.mail_settings.forward_spam.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve forward spam mail settings #
# GET /mail_settings/forward_spam #
response = sg.client.mail_settings.forward_spam.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update plain content mail settings #
# PATCH /mail_settings/plain_content #
data = {
"enabled": False
}
response = sg.client.mail_settings.plain_content.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve plain content mail settings #
# GET /mail_settings/plain_content #
response = sg.client.mail_settings.plain_content.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update spam check mail settings #
# PATCH /mail_settings/spam_check #
data = {
"enabled": True,
"max_score": 5,
"url": "url"
}
response = sg.client.mail_settings.spam_check.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve spam check mail settings #
# GET /mail_settings/spam_check #
response = sg.client.mail_settings.spam_check.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update template mail settings #
# PATCH /mail_settings/template #
data = {
"enabled": True,
"html_content": "<% body %>"
}
response = sg.client.mail_settings.template.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve legacy template mail settings #
# GET /mail_settings/template #
response = sg.client.mail_settings.template.get()
print(response.status_code)
print(response.body)
print(response.headers)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_grapper
----------------------------------
Tests for `grapper` module.
"""
import unittest
from grapper import grapper
import os
import json
import multiprocessing
OUTPUT_FILE = "test_output.json"
class TestGrapper(unittest.TestCase):
def setUp(self):
try:
os.remove(OUTPUT_FILE)
except OSError:
pass
def tearDown(self):
try:
os.remove(OUTPUT_FILE)
except OSError:
pass
def test_file_writer(self):
"""Given a writer queue,
When I send JSON strings to the queue,
And I send the queue to the file writer
Then a valid JSON list will be written
to the output file specified"""
writer_queue = multiprocessing.Queue()
writer_queue.put('1')
writer_queue.put('"1"')
writer_queue.put('{"1": 1 }')
writer_queue.put(grapper.STOP_TOKEN)
grapper.file_writer(OUTPUT_FILE, writer_queue, grapper.STOP_TOKEN)
with open(OUTPUT_FILE, 'r') as output:
jsondata = json.load(output)
dict_list = [coord for coord in jsondata]
self.assertEqual(dict_list, [1, "1", {"1": 1}])
def test_remap_genome_coordinate(self):
"""Given a valid coordinate on the old reference genome,
And a dictionary of mappings
When I call remap_genome_coordinates
Then I gent back the new coordinate"""
coordinate = {"chromosome": "1", "position": 150, "reference": "A"}
align_tuples = [
(100,"1",100,300,"2"),
(300,"2",200,20,"7")
]
new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])
self.assertEqual(
new_mapping, {
"chromosome": "2", "position": 350, "reference": "A"})
def test_remap_position_outside_expected_range(self):
"""Given a position coordinate outside of the allowed bounds for the chromosome
And a dictionary of mappings
When I call remap_genome_coordinates
Then I gent back None"""
coordinate = {"chromosome": "1", "position": 35, "reference": "A"}
align_tuples = [
(100,"1",100,300,"2"),
(300,"2",200,20,"7")
]
new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])
self.assertEqual(new_mapping, None)
coordinate = {"chromosome": "1", "position": 201, "reference": "A"}
align_tuples = [
(100,"1",100,300,"2"),
(300,"2",200,20,"7")
]
new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])
self.assertEqual(new_mapping, None)
def test_chromosome_not_mapped(self):
"""Given an input coordinate for which the chromosome is not mapped
And a dictionary of mappings
When I call remap_genome_coordinates
Then I gent back None"""
coordinate = {"chromosome": "12", "position": 150, "reference": "A"}
align_tuples = [
(100,"1",100,300,"2"),
(300,"2",200,20,"7")
]
new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])
self.assertEqual(new_mapping, None)
def test_two_alignments_from_same_chromosome(self):
"""Given 2 different fragments are in
the alignment file tests/test_data/alignment2x.json
from the same chromosome
and 2 different fragments are also in the
coordinates file tests/test_data/source_coordinates2x.json
from the same chromosome
When I run the program
Then I expect the JSON output file to contain target coordinates:
[{ "chromosome": "2", "position": 350, "reference": "A" },
{ "chromosome": "7", "position": 20, "reference": "C" }
]
"""
try:
os.remove(OUTPUT_FILE)
except OSError:
pass
alignfile = "tests/test_data/alignment2x.json"
coordsfile = "tests/test_data/source_coordinates2x.json"
grapper.handle_command(alignfile, coordsfile, OUTPUT_FILE)
# Wait for file to be fully flushed to the disk
with open(OUTPUT_FILE, 'r') as output:
target_coords = json.load(output)
dict_list = [coord for coord in target_coords]
self.assertEqual(dict_list, [{"chromosome": "2",
"position": 350,
"reference": "A"},
{"chromosome": "7",
"position": 20,
"reference": "C"}])
os.remove(OUTPUT_FILE)
def test_handle_command(self):
"""Given the JSON alignment file
in tests/test_data/alignment.json with contents
[{ "length": 100,
"source": { "chromosome": "1", "start": 100 },
"target": { "chromosome": "2", "start": 300 } },
{ "length": 200,
"source": { "chromosome": "2", "start": 300 },
"target": { "chromosome": "7", "start":
20 } }
]
And the JSON source coordinate file in
tests/test_data/source_coordinates.json with contents
[{ "chromosome": "1", "position": 150, "reference": "A" },
{ "chromosome": "2", "position": 300, "reference": "C" }
]
When I run the program
Then I expect the JSON output file to contain target coordinates:
[{ "chromosome": "2", "position": 350, "reference": "A" },
{ "chromosome": "7", "position": 20, "reference": "C" }
]
"""
try:
os.remove(OUTPUT_FILE)
except OSError:
pass
alignfile = "tests/test_data/alignment.json"
coordsfile = "tests/test_data/source_coordinates.json"
grapper.handle_command(alignfile, coordsfile, OUTPUT_FILE)
# Wait for file to be fully flushed to the disk
with open(OUTPUT_FILE, 'r') as output:
target_coords = json.load(output)
dict_list = [coord for coord in target_coords]
self.assertEqual(dict_list, [{"chromosome": "2",
"position": 350,
"reference": "A"},
{"chromosome": "7",
"position": 20,
"reference": "C"}])
os.remove(OUTPUT_FILE)
def test_handle_outside_range_or_unmapped(self):
"""Given the JSON alignment file in
tests/test_data/alignment.json with contents
[{ "length": 100,
"source": { "chromosome": "1", "start": 100 },
"target": { "chromosome": "2", "start": 300 } },
{ "length": 200, "source": { "chromosome": "2", "start": 300 },
"target": { "chromosome": "7", "start": 20 } }
]
And the JSON source coordinate file in
tests/test_data/source_coordinates_with_invalid.json with contents
[{ "chromosome": "1", "position": 150, "reference": "A" },
{ "chromosome": "2", "position": 300, "reference": "C" },
{ "chromosome": "1", "position": 35, "reference": "A" },
{ "chromosome": "1", "position": 201, "reference": "A" },
{ "chromosome": "12", "position": 150, "reference": "A" }
]
When I run the program
Then I expect the JSON output file to contain target coordinates:
[{ "chromosome": "2", "position": 350, "reference": "A" },
{ "chromosome": "7", "position": 20, "reference": "C" }
]
"""
alignfile = "tests/test_data/alignment.json"
coordsfile = "tests/test_data/source_coordinates_with_invalid.json"
grapper.handle_command(alignfile, coordsfile, OUTPUT_FILE)
# Wait for file to be fully flushed to the disk
with open(OUTPUT_FILE, 'r') as output:
target_coords = json.load(output)
dict_list = [coord for coord in target_coords]
self.assertEqual(dict_list, [{"chromosome": "2",
"position": 350,
"reference": "A"},
{"chromosome": "7",
"position": 20,
"reference": "C"}])
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script manages the installed toolchains in the chroot.
"""
import copy
import glob
import json
import os
from chromite.cbuildbot import constants
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import toolchain
# Needs to be after chromite imports.
import lddtree
if cros_build_lib.IsInsideChroot():
# Only import portage after we've checked that we're inside the chroot.
# Outside may not have portage, in which case the above may not happen.
# We'll check in main() if the operation needs portage.
# pylint: disable=F0401
import portage
EMERGE_CMD = os.path.join(constants.CHROMITE_BIN_DIR, 'parallel_emerge')
PACKAGE_STABLE = '[stable]'
PACKAGE_NONE = '[none]'
SRC_ROOT = os.path.realpath(constants.SOURCE_ROOT)
CHROMIUMOS_OVERLAY = '/usr/local/portage/chromiumos'
STABLE_OVERLAY = '/usr/local/portage/stable'
CROSSDEV_OVERLAY = '/usr/local/portage/crossdev'
# TODO: The versions are stored here very much like in setup_board.
# The goal for future is to differentiate these using a config file.
# This is done essentially by messing with GetDesiredPackageVersions()
DEFAULT_VERSION = PACKAGE_STABLE
DEFAULT_TARGET_VERSION_MAP = {
}
TARGET_VERSION_MAP = {
'host' : {
'gdb' : PACKAGE_NONE,
},
}
# Overrides for {gcc,binutils}-config, pick a package with particular suffix.
CONFIG_TARGET_SUFFIXES = {
'binutils' : {
'i686-pc-linux-gnu' : '-gold',
'x86_64-cros-linux-gnu' : '-gold',
},
}
# Global per-run cache that will be filled ondemand in by GetPackageMap()
# function as needed.
target_version_map = {
}
class Crossdev(object):
"""Class for interacting with crossdev and caching its output."""
_CACHE_FILE = os.path.join(CROSSDEV_OVERLAY, '.configured.json')
_CACHE = {}
@classmethod
def Load(cls, reconfig):
"""Load crossdev cache from disk."""
crossdev_version = GetStablePackageVersion('sys-devel/crossdev', True)
cls._CACHE = {'crossdev_version': crossdev_version}
if os.path.exists(cls._CACHE_FILE) and not reconfig:
with open(cls._CACHE_FILE) as f:
data = json.load(f)
if crossdev_version == data.get('crossdev_version'):
cls._CACHE = data
@classmethod
def Save(cls):
"""Store crossdev cache on disk."""
# Save the cache from the successful run.
with open(cls._CACHE_FILE, 'w') as f:
json.dump(cls._CACHE, f)
@classmethod
def GetConfig(cls, target):
"""Returns a map of crossdev provided variables about a tuple."""
CACHE_ATTR = '_target_tuple_map'
val = cls._CACHE.setdefault(CACHE_ATTR, {})
if not target in val:
# Find out the crossdev tuple.
target_tuple = target
if target == 'host':
target_tuple = toolchain.GetHostTuple()
# Catch output of crossdev.
out = cros_build_lib.RunCommand(['crossdev', '--show-target-cfg',
'--ex-gdb', target_tuple],
print_cmd=False, redirect_stdout=True).output.splitlines()
# List of tuples split at the first '=', converted into dict.
val[target] = dict([x.split('=', 1) for x in out])
return val[target]
@classmethod
def UpdateTargets(cls, targets, usepkg, config_only=False):
"""Calls crossdev to initialize a cross target.
Args:
targets: The list of targets to initialize using crossdev.
usepkg: Copies the commandline opts.
config_only: Just update.
"""
configured_targets = cls._CACHE.setdefault('configured_targets', [])
cmdbase = ['crossdev', '--show-fail-log']
cmdbase.extend(['--env', 'FEATURES=splitdebug'])
# Pick stable by default, and override as necessary.
cmdbase.extend(['-P', '--oneshot'])
if usepkg:
cmdbase.extend(['-P', '--getbinpkg',
'-P', '--usepkgonly',
'--without-headers'])
overlays = '%s %s' % (CHROMIUMOS_OVERLAY, STABLE_OVERLAY)
cmdbase.extend(['--overlays', overlays])
cmdbase.extend(['--ov-output', CROSSDEV_OVERLAY])
for target in targets:
if config_only and target in configured_targets:
continue
cmd = cmdbase + ['-t', target]
for pkg in GetTargetPackages(target):
if pkg == 'gdb':
# Gdb does not have selectable versions.
cmd.append('--ex-gdb')
continue
# The first of the desired versions is the "primary" one.
version = GetDesiredPackageVersions(target, pkg)[0]
cmd.extend(['--%s' % pkg, version])
cmd.extend(targets[target]['crossdev'].split())
if config_only:
# In this case we want to just quietly reinit
cmd.append('--init-target')
cros_build_lib.RunCommand(cmd, print_cmd=False, redirect_stdout=True)
else:
cros_build_lib.RunCommand(cmd)
configured_targets.append(target)
def GetPackageMap(target):
"""Compiles a package map for the given target from the constants.
Uses a cache in target_version_map, that is dynamically filled in as needed,
since here everything is static data and the structuring is for ease of
configurability only.
args:
target - the target for which to return a version map
returns a map between packages and desired versions in internal format
(using the PACKAGE_* constants)
"""
if target in target_version_map:
return target_version_map[target]
# Start from copy of the global defaults.
result = copy.copy(DEFAULT_TARGET_VERSION_MAP)
for pkg in GetTargetPackages(target):
# prefer any specific overrides
if pkg in TARGET_VERSION_MAP.get(target, {}):
result[pkg] = TARGET_VERSION_MAP[target][pkg]
else:
# finally, if not already set, set a sane default
result.setdefault(pkg, DEFAULT_VERSION)
target_version_map[target] = result
return result
def GetTargetPackages(target):
"""Returns a list of packages for a given target."""
conf = Crossdev.GetConfig(target)
# Undesired packages are denoted by empty ${pkg}_pn variable.
return [x for x in conf['crosspkgs'].strip("'").split() if conf[x+'_pn']]
# Portage helper functions:
def GetPortagePackage(target, package):
"""Returns a package name for the given target."""
conf = Crossdev.GetConfig(target)
# Portage category:
if target == 'host':
category = conf[package + '_category']
else:
category = conf['category']
# Portage package:
pn = conf[package + '_pn']
# Final package name:
assert(category)
assert(pn)
return '%s/%s' % (category, pn)
def IsPackageDisabled(target, package):
"""Returns if the given package is not used for the target."""
return GetDesiredPackageVersions(target, package) == [PACKAGE_NONE]
def GetInstalledPackageVersions(atom):
"""Extracts the list of current versions of a target, package pair.
args:
atom - the atom to operate on (e.g. sys-devel/gcc)
returns the list of versions of the package currently installed.
"""
versions = []
# pylint: disable=E1101
for pkg in portage.db['/']['vartree'].dbapi.match(atom, use_cache=0):
version = portage.versions.cpv_getversion(pkg)
versions.append(version)
return versions
def GetStablePackageVersion(atom, installed):
"""Extracts the current stable version for a given package.
args:
target, package - the target/package to operate on eg. i686-pc-linux-gnu,gcc
installed - Whether we want installed packages or ebuilds
returns a string containing the latest version.
"""
pkgtype = 'vartree' if installed else 'porttree'
# pylint: disable=E1101
cpv = portage.best(portage.db['/'][pkgtype].dbapi.match(atom, use_cache=0))
return portage.versions.cpv_getversion(cpv) if cpv else None
def VersionListToNumeric(target, package, versions, installed):
"""Resolves keywords in a given version list for a particular package.
Resolving means replacing PACKAGE_STABLE with the actual number.
args:
target, package - the target/package to operate on eg. i686-pc-linux-gnu,gcc
versions - list of versions to resolve
returns list of purely numeric versions equivalent to argument
"""
resolved = []
atom = GetPortagePackage(target, package)
for version in versions:
if version == PACKAGE_STABLE:
resolved.append(GetStablePackageVersion(atom, installed))
elif version != PACKAGE_NONE:
resolved.append(version)
return resolved
def GetDesiredPackageVersions(target, package):
"""Produces the list of desired versions for each target, package pair.
The first version in the list is implicitly treated as primary, ie.
the version that will be initialized by crossdev and selected.
If the version is PACKAGE_STABLE, it really means the current version which
is emerged by using the package atom with no particular version key.
Since crossdev unmasks all packages by default, this will actually
mean 'unstable' in most cases.
args:
target, package - the target/package to operate on eg. i686-pc-linux-gnu,gcc
returns a list composed of either a version string, PACKAGE_STABLE
"""
packagemap = GetPackageMap(target)
versions = []
if package in packagemap:
versions.append(packagemap[package])
return versions
def TargetIsInitialized(target):
"""Verifies if the given list of targets has been correctly initialized.
This determines whether we have to call crossdev while emerging
toolchain packages or can do it using emerge. Emerge is naturally
preferred, because all packages can be updated in a single pass.
args:
targets - list of individual cross targets which are checked
returns True if target is completely initialized
returns False otherwise
"""
# Check if packages for the given target all have a proper version.
try:
for package in GetTargetPackages(target):
atom = GetPortagePackage(target, package)
# Do we even want this package && is it initialized?
if not IsPackageDisabled(target, package) and not (
GetStablePackageVersion(atom, True) and
GetStablePackageVersion(atom, False)):
return False
return True
except cros_build_lib.RunCommandError:
# Fails - The target has likely never been initialized before.
return False
def RemovePackageMask(target):
"""Removes a package.mask file for the given platform.
The pre-existing package.mask files can mess with the keywords.
args:
target - the target for which to remove the file
"""
maskfile = os.path.join('/etc/portage/package.mask', 'cross-' + target)
osutils.SafeUnlink(maskfile)
# Main functions performing the actual update steps.
def RebuildLibtool():
"""Rebuild libtool as needed
Libtool hardcodes full paths to internal gcc files, so whenever we upgrade
gcc, libtool will break. We can't use binary packages either as those will
most likely be compiled against the previous version of gcc.
"""
needs_update = False
with open('/usr/bin/libtool') as f:
for line in f:
# Look for a line like:
# sys_lib_search_path_spec="..."
# It'll be a list of paths and gcc will be one of them.
if line.startswith('sys_lib_search_path_spec='):
line = line.rstrip()
for path in line.split('=', 1)[1].strip('"').split():
if not os.path.exists(path):
print 'Rebuilding libtool after gcc upgrade'
print ' %s' % line
print ' missing path: %s' % path
needs_update = True
break
if needs_update:
break
if needs_update:
cmd = [EMERGE_CMD, '--oneshot', 'sys-devel/libtool']
cros_build_lib.RunCommand(cmd)
def UpdateTargets(targets, usepkg):
"""Determines which packages need update/unmerge and defers to portage.
args:
targets - the list of targets to update
usepkg - copies the commandline option
"""
# Remove keyword files created by old versions of cros_setup_toolchains.
osutils.SafeUnlink('/etc/portage/package.keywords/cross-host')
# For each target, we do two things. Figure out the list of updates,
# and figure out the appropriate keywords/masks. Crossdev will initialize
# these, but they need to be regenerated on every update.
print 'Determining required toolchain updates...'
mergemap = {}
for target in targets:
# Record the highest needed version for each target, for masking purposes.
RemovePackageMask(target)
for package in GetTargetPackages(target):
# Portage name for the package
if IsPackageDisabled(target, package):
continue
pkg = GetPortagePackage(target, package)
current = GetInstalledPackageVersions(pkg)
desired = GetDesiredPackageVersions(target, package)
desired_num = VersionListToNumeric(target, package, desired, False)
mergemap[pkg] = set(desired_num).difference(current)
packages = []
for pkg in mergemap:
for ver in mergemap[pkg]:
if ver != PACKAGE_NONE:
packages.append(pkg)
if not packages:
print 'Nothing to update!'
return False
print 'Updating packages:'
print packages
cmd = [EMERGE_CMD, '--oneshot', '--update']
if usepkg:
cmd.extend(['--getbinpkg', '--usepkgonly'])
cmd.extend(packages)
cros_build_lib.RunCommand(cmd)
return True
def CleanTargets(targets):
"""Unmerges old packages that are assumed unnecessary."""
unmergemap = {}
for target in targets:
for package in GetTargetPackages(target):
if IsPackageDisabled(target, package):
continue
pkg = GetPortagePackage(target, package)
current = GetInstalledPackageVersions(pkg)
desired = GetDesiredPackageVersions(target, package)
desired_num = VersionListToNumeric(target, package, desired, True)
if not set(desired_num).issubset(current):
print 'Some packages have been held back, skipping clean!'
return
unmergemap[pkg] = set(current).difference(desired_num)
# Cleaning doesn't care about consistency and rebuilding package.* files.
packages = []
for pkg, vers in unmergemap.iteritems():
packages.extend('=%s-%s' % (pkg, ver) for ver in vers if ver != '9999')
if packages:
print 'Cleaning packages:'
print packages
cmd = [EMERGE_CMD, '--unmerge']
cmd.extend(packages)
cros_build_lib.RunCommand(cmd)
else:
print 'Nothing to clean!'
def SelectActiveToolchains(targets, suffixes):
"""Runs gcc-config and binutils-config to select the desired.
args:
targets - the targets to select
"""
for package in ['gcc', 'binutils']:
for target in targets:
# Pick the first version in the numbered list as the selected one.
desired = GetDesiredPackageVersions(target, package)
desired_num = VersionListToNumeric(target, package, desired, True)
desired = desired_num[0]
# *-config does not play revisions, strip them, keep just PV.
desired = portage.versions.pkgsplit('%s-%s' % (package, desired))[1]
if target == 'host':
# *-config is the only tool treating host identically (by tuple).
target = toolchain.GetHostTuple()
# And finally, attach target to it.
desired = '%s-%s' % (target, desired)
# Target specific hacks
if package in suffixes:
if target in suffixes[package]:
desired += suffixes[package][target]
extra_env = {'CHOST': target}
cmd = ['%s-config' % package, '-c', target]
current = cros_build_lib.RunCommand(cmd, print_cmd=False,
redirect_stdout=True, extra_env=extra_env).output.splitlines()[0]
# Do not gcc-config when the current is live or nothing needs to be done.
if current != desired and current != '9999':
cmd = [ package + '-config', desired ]
cros_build_lib.RunCommand(cmd, print_cmd=False)
def ExpandTargets(targets_wanted):
"""Expand any possible toolchain aliases into full targets
This will expand 'all' and 'sdk' into the respective toolchain tuples.
Args:
targets_wanted: The targets specified by the user.
Returns:
Full list of tuples with pseudo targets removed.
"""
alltargets = toolchain.GetAllTargets()
targets_wanted = set(targets_wanted)
if targets_wanted == set(['all']):
targets = alltargets
elif targets_wanted == set(['sdk']):
# Filter out all the non-sdk toolchains as we don't want to mess
# with those in all of our builds.
targets = toolchain.FilterToolchains(alltargets, 'sdk', True)
else:
# Verify user input.
nonexistent = targets_wanted.difference(alltargets)
if nonexistent:
raise ValueError('Invalid targets: %s', ','.join(nonexistent))
targets = dict((t, alltargets[t]) for t in targets_wanted)
return targets
def UpdateToolchains(usepkg, deleteold, hostonly, reconfig,
targets_wanted, boards_wanted):
"""Performs all steps to create a synchronized toolchain enviroment.
args:
arguments correspond to the given commandline flags
"""
targets, crossdev_targets, reconfig_targets = {}, {}, {}
if not hostonly:
# For hostonly, we can skip most of the below logic, much of which won't
# work on bare systems where this is useful.
targets = ExpandTargets(targets_wanted)
# Now re-add any targets that might be from this board. This is
# to allow unofficial boards to declare their own toolchains.
for board in boards_wanted:
targets.update(toolchain.GetToolchainsForBoard(board))
# First check and initialize all cross targets that need to be.
for target in targets:
if TargetIsInitialized(target):
reconfig_targets[target] = targets[target]
else:
crossdev_targets[target] = targets[target]
if crossdev_targets:
print 'The following targets need to be re-initialized:'
print crossdev_targets
Crossdev.UpdateTargets(crossdev_targets, usepkg)
# Those that were not initialized may need a config update.
Crossdev.UpdateTargets(reconfig_targets, usepkg, config_only=True)
# We want host updated.
targets['host'] = {}
# Now update all packages.
if UpdateTargets(targets, usepkg) or crossdev_targets or reconfig:
SelectActiveToolchains(targets, CONFIG_TARGET_SUFFIXES)
if deleteold:
CleanTargets(targets)
# Now that we've cleared out old versions, see if we need to rebuild
# anything. Can't do this earlier as it might not be broken.
RebuildLibtool()
def ShowBoardConfig(board):
"""Show the toolchain tuples used by |board|
Args:
board: The board to query.
"""
toolchains = toolchain.GetToolchainsForBoard(board)
# Make sure we display the default toolchain first.
print ','.join(
toolchain.FilterToolchains(toolchains, 'default', True).keys() +
toolchain.FilterToolchains(toolchains, 'default', False).keys())
def GeneratePathWrapper(root, wrappath, path):
"""Generate a shell script to execute another shell script
Since we can't symlink a wrapped ELF (see GenerateLdsoWrapper) because the
argv[0] won't be pointing to the correct path, generate a shell script that
just executes another program with its full path.
Args:
root: The root tree to generate scripts inside of
wrappath: The full path (inside |root|) to create the wrapper
path: The target program which this wrapper will execute
"""
replacements = {
'path': path,
'relroot': os.path.relpath('/', os.path.dirname(wrappath)),
}
wrapper = """#!/bin/sh
base=$(realpath "$0")
basedir=${base%%/*}
exec "${basedir}/%(relroot)s%(path)s" "$@"
""" % replacements
root_wrapper = root + wrappath
if os.path.islink(root_wrapper):
os.unlink(root_wrapper)
else:
osutils.SafeMakedirs(os.path.dirname(root_wrapper))
osutils.WriteFile(root_wrapper, wrapper)
os.chmod(root_wrapper, 0o755)
def FileIsCrosSdkElf(elf):
"""Determine if |elf| is an ELF that we execute in the cros_sdk
We don't need this to be perfect, just quick. It makes sure the ELF
is a 64bit LSB x86_64 ELF. That is the native type of cros_sdk.
Args:
elf: The file to check
Returns:
True if we think |elf| is a native ELF
"""
with open(elf) as f:
data = f.read(20)
# Check the magic number, EI_CLASS, EI_DATA, and e_machine.
return (data[0:4] == '\x7fELF' and
data[4] == '\x02' and
data[5] == '\x01' and
data[18] == '\x3e')
def IsPathPackagable(ptype, path):
"""Should the specified file be included in a toolchain package?
We only need to handle files as we'll create dirs as we need them.
Further, trim files that won't be useful:
- non-english translations (.mo) since it'd require env vars
- debug files since these are for the host compiler itself
- info/man pages as they're big, and docs are online, and the
native docs should work fine for the most part (`man gcc`)
Args:
ptype: A string describing the path type (i.e. 'file' or 'dir' or 'sym')
path: The full path to inspect
Returns:
True if we want to include this path in the package
"""
return not (ptype in ('dir',) or
path.startswith('/usr/lib/debug/') or
os.path.splitext(path)[1] == '.mo' or
('/man/' in path or '/info/' in path))
def ReadlinkRoot(path, root):
"""Like os.readlink(), but relative to a |root|
Args:
path: The symlink to read
root: The path to use for resolving absolute symlinks
Returns:
A fully resolved symlink path
"""
while os.path.islink(root + path):
path = os.path.join(os.path.dirname(path), os.readlink(root + path))
return path
def _GetFilesForTarget(target, root='/'):
"""Locate all the files to package for |target|
This does not cover ELF dependencies.
Args:
target: The toolchain target name
root: The root path to pull all packages from
Returns:
A tuple of a set of all packable paths, and a set of all paths which
are also native ELFs
"""
paths = set()
elfs = set()
# Find all the files owned by the packages for this target.
for pkg in GetTargetPackages(target):
# Ignore packages that are part of the target sysroot.
if pkg in ('kernel', 'libc'):
continue
atom = GetPortagePackage(target, pkg)
cat, pn = atom.split('/')
ver = GetInstalledPackageVersions(atom)[0]
cros_build_lib.Info('packaging %s-%s', atom, ver)
# pylint: disable=E1101
dblink = portage.dblink(cat, pn + '-' + ver, myroot=root,
settings=portage.settings)
contents = dblink.getcontents()
for obj in contents:
ptype = contents[obj][0]
if not IsPathPackagable(ptype, obj):
continue
if ptype == 'obj':
# For native ELFs, we need to pull in their dependencies too.
if FileIsCrosSdkElf(obj):
elfs.add(obj)
paths.add(obj)
return paths, elfs
def _BuildInitialPackageRoot(output_dir, paths, elfs, ldpaths,
path_rewrite_func=lambda x:x, root='/'):
"""Link in all packable files and their runtime dependencies
This also wraps up executable ELFs with helper scripts.
Args:
output_dir: The output directory to store files
paths: All the files to include
elfs: All the files which are ELFs (a subset of |paths|)
ldpaths: A dict of static ldpath information
path_rewrite_func: User callback to rewrite paths in output_dir
root: The root path to pull all packages/files from
"""
# Link in all the files.
sym_paths = []
for path in paths:
new_path = path_rewrite_func(path)
dst = output_dir + new_path
osutils.SafeMakedirs(os.path.dirname(dst))
# Is this a symlink which we have to rewrite or wrap?
# Delay wrap check until after we have created all paths.
src = root + path
if os.path.islink(src):
tgt = os.readlink(src)
if os.path.sep in tgt:
sym_paths.append((new_path, lddtree.normpath(ReadlinkRoot(src, root))))
# Rewrite absolute links to relative and then generate the symlink
# ourselves. All other symlinks can be hardlinked below.
if tgt[0] == '/':
tgt = os.path.relpath(tgt, os.path.dirname(new_path))
os.symlink(tgt, dst)
continue
os.link(src, dst)
# Now see if any of the symlinks need to be wrapped.
for sym, tgt in sym_paths:
if tgt in elfs:
GeneratePathWrapper(output_dir, sym, tgt)
# Locate all the dependencies for all the ELFs. Stick them all in the
# top level "lib" dir to make the wrapper simpler. This exact path does
# not matter since we execute ldso directly, and we tell the ldso the
# exact path to search for its libraries.
libdir = os.path.join(output_dir, 'lib')
osutils.SafeMakedirs(libdir)
donelibs = set()
for elf in elfs:
e = lddtree.ParseELF(elf, root=root, ldpaths=ldpaths)
interp = e['interp']
if interp:
# Generate a wrapper if it is executable.
interp = os.path.join('/lib', os.path.basename(interp))
lddtree.GenerateLdsoWrapper(output_dir, path_rewrite_func(elf), interp,
libpaths=e['rpath'] + e['runpath'])
for lib, lib_data in e['libs'].iteritems():
if lib in donelibs:
continue
src = path = lib_data['path']
if path is None:
cros_build_lib.Warning('%s: could not locate %s', elf, lib)
continue
donelibs.add(lib)
# Needed libs are the SONAME, but that is usually a symlink, not a
# real file. So link in the target rather than the symlink itself.
# We have to walk all the possible symlinks (SONAME could point to a
# symlink which points to a symlink), and we have to handle absolute
# ourselves (since we have a "root" argument).
dst = os.path.join(libdir, os.path.basename(path))
src = ReadlinkRoot(src, root)
os.link(root + src, dst)
def _EnvdGetVar(envd, var):
"""Given a Gentoo env.d file, extract a var from it
Args:
envd: The env.d file to load (may be a glob path)
var: The var to extract
Returns:
The value of |var|
"""
envds = glob.glob(envd)
assert len(envds) == 1, '%s: should have exactly 1 env.d file' % envd
envd = envds[0]
return cros_build_lib.LoadKeyValueFile(envd)[var]
def _ProcessBinutilsConfig(target, output_dir):
"""Do what binutils-config would have done"""
binpath = os.path.join('/bin', target + '-')
globpath = os.path.join(output_dir, 'usr', toolchain.GetHostTuple(), target,
'binutils-bin', '*-gold')
srcpath = glob.glob(globpath)
assert len(srcpath) == 1, '%s: did not match 1 path' % globpath
srcpath = srcpath[0][len(output_dir):]
gccpath = os.path.join('/usr', 'libexec', 'gcc')
for prog in os.listdir(output_dir + srcpath):
# Skip binaries already wrapped.
if not prog.endswith('.real'):
GeneratePathWrapper(output_dir, binpath + prog,
os.path.join(srcpath, prog))
GeneratePathWrapper(output_dir, os.path.join(gccpath, prog),
os.path.join(srcpath, prog))
libpath = os.path.join('/usr', toolchain.GetHostTuple(), target, 'lib')
envd = os.path.join(output_dir, 'etc', 'env.d', 'binutils', '*-gold')
srcpath = _EnvdGetVar(envd, 'LIBPATH')
os.symlink(os.path.relpath(srcpath, os.path.dirname(libpath)),
output_dir + libpath)
def _ProcessGccConfig(target, output_dir):
"""Do what gcc-config would have done"""
binpath = '/bin'
envd = os.path.join(output_dir, 'etc', 'env.d', 'gcc', '*')
srcpath = _EnvdGetVar(envd, 'GCC_PATH')
for prog in os.listdir(output_dir + srcpath):
# Skip binaries already wrapped.
if (not prog.endswith('.real') and
not prog.endswith('.elf') and
prog.startswith(target)):
GeneratePathWrapper(output_dir, os.path.join(binpath, prog),
os.path.join(srcpath, prog))
return srcpath
def _ProcessSysrootWrapper(_target, output_dir, srcpath):
"""Remove chroot-specific things from our sysroot wrapper"""
# Disable ccache since we know it won't work outside of chroot.
sysroot_wrapper = glob.glob(os.path.join(
output_dir + srcpath, 'sysroot_wrapper*'))[0]
contents = osutils.ReadFile(sysroot_wrapper).splitlines()
for num in xrange(len(contents)):
if '@CCACHE_DEFAULT@' in contents[num]:
contents[num] = 'use_ccache = False'
break
# Can't update the wrapper in place since it's a hardlink to a file in /.
os.unlink(sysroot_wrapper)
osutils.WriteFile(sysroot_wrapper, '\n'.join(contents))
os.chmod(sysroot_wrapper, 0o755)
def _ProcessDistroCleanups(target, output_dir):
"""Clean up the tree and remove all distro-specific requirements
Args:
target: The toolchain target name
output_dir: The output directory to clean up
"""
_ProcessBinutilsConfig(target, output_dir)
gcc_path = _ProcessGccConfig(target, output_dir)
_ProcessSysrootWrapper(target, output_dir, gcc_path)
osutils.RmDir(os.path.join(output_dir, 'etc'))
def CreatePackagableRoot(target, output_dir, ldpaths, root='/'):
"""Setup a tree from the packages for the specified target
This populates a path with all the files from toolchain packages so that
a tarball can easily be generated from the result.
Args:
target: The target to create a packagable root from
output_dir: The output directory to place all the files
ldpaths: A dict of static ldpath information
root: The root path to pull all packages/files from
"""
# Find all the files owned by the packages for this target.
paths, elfs = _GetFilesForTarget(target, root=root)
# Link in all the package's files, any ELF dependencies, and wrap any
# executable ELFs with helper scripts.
def MoveUsrBinToBin(path):
"""Move /usr/bin to /bin so people can just use that toplevel dir"""
return path[4:] if path.startswith('/usr/bin/') else path
_BuildInitialPackageRoot(output_dir, paths, elfs, ldpaths,
path_rewrite_func=MoveUsrBinToBin, root=root)
# The packages, when part of the normal distro, have helper scripts
# that setup paths and such. Since we are making this standalone, we
# need to preprocess all that ourselves.
_ProcessDistroCleanups(target, output_dir)
def CreatePackages(targets_wanted, output_dir, root='/'):
"""Create redistributable cross-compiler packages for the specified targets
This creates toolchain packages that should be usable in conjunction with
a downloaded sysroot (created elsewhere).
Tarballs (one per target) will be created in $PWD.
Args:
targets_wanted: The targets to package up.
output_dir: The directory to put the packages in.
root: The root path to pull all packages/files from.
"""
osutils.SafeMakedirs(output_dir)
ldpaths = lddtree.LoadLdpaths(root)
targets = ExpandTargets(targets_wanted)
with osutils.TempDir() as tempdir:
# We have to split the root generation from the compression stages. This is
# because we hardlink in all the files (to avoid overhead of reading/writing
# the copies multiple times). But tar gets angry if a file's hardlink count
# changes from when it starts reading a file to when it finishes.
with parallel.BackgroundTaskRunner(CreatePackagableRoot) as queue:
for target in targets:
output_target_dir = os.path.join(tempdir, target)
queue.put([target, output_target_dir, ldpaths, root])
# Build the tarball.
with parallel.BackgroundTaskRunner(cros_build_lib.CreateTarball) as queue:
for target in targets:
tar_file = os.path.join(output_dir, target + '.tar.xz')
queue.put([tar_file, os.path.join(tempdir, target)])
def main(argv):
usage = """usage: %prog [options]
The script installs and updates the toolchains in your chroot."""
parser = commandline.OptionParser(usage)
parser.add_option('-u', '--nousepkg',
action='store_false', dest='usepkg', default=True,
help='Use prebuilt packages if possible')
parser.add_option('-d', '--deleteold',
action='store_true', dest='deleteold', default=False,
help='Unmerge deprecated packages')
parser.add_option('-t', '--targets',
dest='targets', default='sdk',
help='Comma separated list of tuples. '
'Special keyword \'host\' is allowed. Default: sdk')
parser.add_option('--include-boards',
dest='include_boards', default='',
help='Comma separated list of boards whose toolchains we'
' will always include. Default: none')
parser.add_option('--hostonly',
dest='hostonly', default=False, action='store_true',
help='Only setup the host toolchain. '
'Useful for bootstrapping chroot')
parser.add_option('--show-board-cfg',
dest='board_cfg', default=None,
help='Board to list toolchain tuples for')
parser.add_option('--create-packages',
action='store_true', default=False,
help='Build redistributable packages')
parser.add_option('--output-dir', default=os.getcwd(), type='path',
help='Output directory')
parser.add_option('--reconfig', default=False, action='store_true',
help='Reload crossdev config and reselect toolchains')
(options, remaining_arguments) = parser.parse_args(argv)
if len(remaining_arguments):
parser.error('script does not take arguments: %s' % remaining_arguments)
# Figure out what we're supposed to do and reject conflicting options.
if options.board_cfg and options.create_packages:
parser.error('conflicting options: create-packages & show-board-cfg')
targets = set(options.targets.split(','))
boards = set(options.include_boards.split(',')) if options.include_boards \
else set()
if options.board_cfg:
ShowBoardConfig(options.board_cfg)
elif options.create_packages:
cros_build_lib.AssertInsideChroot()
Crossdev.Load(False)
CreatePackages(targets, options.output_dir)
else:
cros_build_lib.AssertInsideChroot()
# This has to be always run as root.
if os.geteuid() != 0:
cros_build_lib.Die('this script must be run as root')
Crossdev.Load(options.reconfig)
UpdateToolchains(options.usepkg, options.deleteold, options.hostonly,
options.reconfig, targets, boards)
Crossdev.Save()
return 0
|
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import contextlib
import datetime
import functools
import re
import signal
from ccscli import LIST_TYPE
from ccscli import OBJECT_TYPE
from ccscli.compat import OrderedDict
import dateutil.parser
from dateutil.tz import tzlocal
from dateutil.tz import tzutc
# These are chars that do not need to be urlencoded based on rfc2986, section 2.3.
SAFE_CHARS = '-._~'
def get_service_module_name(service_model):
name = service_model.service_name
name = name.replace('Cloudera', '')
name = name.replace('CCS', '')
name = re.sub('\W+', '', name)
return name
def json_encoder(obj):
"""JSON encoder that formats datetimes as ISO8601 format."""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
else:
return obj
class CachedProperty(object):
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
@contextlib.contextmanager
def ignore_ctrl_c():
original = signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
yield
finally:
signal.signal(signal.SIGINT, original)
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input shape (created from ``ccscli.model``) and generate
a sample dictionary corresponding to the input shape.
The specific values used are place holder values. For strings an
empty string is used, for numbers 0 or 0.0 is used. For datetime a date in
RFC822 is used. The intended usage of this class is to generate the *shape*
of the input object. In the future we might take the defaults from the
model.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific object of the input arguments.
Example usage::
clidriver = CLIDriver
ddb = clidriver.get_service_model('mastodon')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('createCluster').input_shape)
print("Sample input for mastodon.createCluster: %s" % sample_input)
"""
def __init__(self):
pass
def generate_skeleton(self, shape):
if shape.type_name == OBJECT_TYPE:
return self._generate_type_object(shape)
elif shape.type_name == LIST_TYPE:
return self._generate_type_array(shape)
elif shape.type_name == 'string':
return ''
elif shape.type_name in ['integer']:
return 0
elif shape.type_name == 'number':
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'datetime':
return 'Wed, 02 Oct 2002 13:00:00 GMT'
else:
raise Exception("Unknown shape type: %s" % shape.type_name)
def _generate_type_object(self, shape):
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self.generate_skeleton(member_shape)
return skeleton
def _generate_type_array(self, shape):
# For list elements we've arbitrarily decided to return the first
# element for the skeleton list.
return [
self.generate_skeleton(shape.member),
]
|
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from io import BytesIO
from optparse import make_option, OptionParser
import traceback
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
if hasattr(out, 'isatty') and out.isatty():
self.style_func = style_func
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def write(self, msg, style_func=None, ending=None):
ending = ending is None and self.ending or ending
if ending and not msg.endswith(ending):
msg += ending
style_func = [f for f in (style_func, self.style_func, lambda x:x)
if f is not None][0]
self._out.write(smart_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
try:
self.execute(*args, **options.__dict__)
except Exception as e:
if options.traceback:
self.stderr.write(traceback.format_exc())
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``, except if force-skipped).
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
saved_lang = None
self.stdout = OutputWrapper(options.get('stdout', sys.stdout))
self.stderr = OutputWrapper(options.get('stderr', sys.stderr), self.style.ERROR)
if self.can_import_settings:
from django.utils import translation
saved_lang = translation.get_language()
translation.activate('en-us')
try:
if self.requires_model_validation and not options.get('skip_validation'):
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;"))
finally:
if saved_lang is not None:
translation.activate(saved_lang)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
s = BytesIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
|
|
"""
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape, format_html, format_html_join, smart_urlquote
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
from django.utils import six
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n'
% (name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), static('admin/')))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{0} {1}<br />{2} {3}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{0}>\n{1}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{0}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if isinstance(v, (tuple, list)):
v = ','.join([str(x) for x in v])
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.to
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse('admin:%s_%s_changelist' %
(rel_to._meta.app_label,
rel_to._meta.model_name),
current_app=self.admin_site.name)
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> '
% (related_url, url, name))
extra.append('<img src="%s" width="16" height="16" alt="%s" /></a>'
% (static('admin/img/selector-search.gif'), _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
return url_params_from_lookup_dict(self.rel.limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.to in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_text(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def media(self):
return self.widget.media
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.model_name)
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
# TODO: "add_id_" is hard-coded here. This should instead use the
# correct API to determine the ID dynamically.
output.append('<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> '
% (related_url, name))
output.append('<img src="%s" width="10" height="10" alt="%s"/></a>'
% (static('admin/img/icon_addlink.gif'), _('Add Another')))
return mark_safe(''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': mark_safe(smart_urlquote(value))}
html = format_html(
'<p class="url">{0} <a {1}>{2}</a><br />{3} {4}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
|
|
#!/usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
"""
File format example: test_spec.json:
{
"targets": {
"KL46Z": ["ARM", "GCC_ARM"],
"LPC1768": ["ARM", "GCC_ARM", "IAR"],
"LPC11U24": ["uARM"],
"NRF51822": ["ARM"]
}
}
File format example: muts_all.json:
{
"1" : {"mcu": "LPC1768",
"port":"COM4",
"disk":"J:\\",
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2" : {"mcu": "KL25Z",
"port":"COM7",
"disk":"G:\\",
"peripherals": ["digital_loop", "port_loop", "analog_loop"]
}
}
"""
# Be sure that the tools directory is in the search path
import sys
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
# Check: Extra modules which are required by core test suite
from tools.utils import check_required_modules
check_required_modules(['prettytable', 'serial'])
# Imports related to mbed build api
from tools.build_api import mcu_toolchain_matrix
# Imports from TEST API
from tools.test_api import SingleTestRunner
from tools.test_api import singletest_in_cli_mode
from tools.test_api import get_json_data_from_file
from tools.test_api import get_avail_tests_summary_table
from tools.test_api import get_default_test_options_parser
from tools.test_api import print_muts_configuration_from_json
from tools.test_api import print_test_configuration_from_json
from tools.test_api import get_autodetected_MUTS_list
from tools.test_api import get_autodetected_TEST_SPEC
from tools.test_api import get_module_avail
from tools.test_exporters import ReportExporter, ResultExporterType
# Importing extra modules which can be not installed but if available they can extend test suite functionality
try:
import mbed_lstools
from tools.compliance.ioper_runner import IOperTestRunner
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
def get_version():
""" Returns test script version
"""
single_test_version_major = 1
single_test_version_minor = 5
return (single_test_version_major, single_test_version_minor)
if __name__ == '__main__':
# Command line options
parser = get_default_test_options_parser()
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
opts = parser.parse_args()
# Print scrip version
if opts.version:
print(parser.description)
print(parser.epilog)
print("Version %d.%d"% get_version())
exit(0)
# Print summary / information about automation test status
if opts.test_automation_report:
print(get_avail_tests_summary_table(platform_filter=opts.general_filter_regex))
exit(0)
# Print summary / information about automation test status
if opts.test_case_report:
test_case_report_cols = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration',
'source_dir']
print(get_avail_tests_summary_table(cols=test_case_report_cols,
result_summary=False,
join_delim='\n',
platform_filter=opts.general_filter_regex))
exit(0)
# Only prints matrix of supported toolchains
if opts.supported_toolchains:
print(mcu_toolchain_matrix(platform_filter=opts.general_filter_regex))
exit(0)
test_spec = None
MUTs = None
if hasattr(opts, 'auto_detect') and opts.auto_detect:
# If auto_detect attribute is present, we assume other auto-detection
# parameters like 'toolchains_filter' are also set.
print("MBEDLS: Detecting connected mbed-enabled devices... ")
MUTs = get_autodetected_MUTS_list()
for mut in MUTs.values():
print("MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu'],
mut['port'],
mut['disk']))
# Set up parameters for test specification filter function (we need to set toolchains per target here)
use_default_toolchain = 'default' in opts.toolchains_filter if opts.toolchains_filter is not None else True
use_supported_toolchains = 'all' in opts.toolchains_filter if opts.toolchains_filter is not None else False
toolchain_filter = opts.toolchains_filter
platform_name_filter = opts.general_filter_regex if opts.general_filter_regex is not None else opts.general_filter_regex
# Test specification with information about each target and associated toolchain
test_spec = get_autodetected_TEST_SPEC(MUTs.values(),
use_default_toolchain=use_default_toolchain,
use_supported_toolchains=use_supported_toolchains,
toolchain_filter=toolchain_filter,
platform_name_filter=platform_name_filter)
else:
# Open file with test specification
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
opts.auto_detect = False
test_spec = get_json_data_from_file(opts.test_spec_filename) if opts.test_spec_filename else None
if test_spec is None:
if not opts.test_spec_filename:
parser.print_help()
exit(-1)
# Get extra MUTs if applicable
MUTs = get_json_data_from_file(opts.muts_spec_filename) if opts.muts_spec_filename else None
if MUTs is None:
if not opts.muts_spec_filename:
parser.print_help()
exit(-1)
if opts.verbose_test_configuration_only:
print("MUTs configuration in %s:" % ('auto-detected' if opts.auto_detect else opts.muts_spec_filename))
if MUTs:
print(print_muts_configuration_from_json(MUTs, platform_filter=opts.general_filter_regex))
print()
print("Test specification in %s:" % ('auto-detected' if opts.auto_detect else opts.test_spec_filename))
if test_spec:
print(print_test_configuration_from_json(test_spec))
exit(0)
if get_module_avail('mbed_lstools'):
if opts.operability_checks:
# Check if test scope is valid and run tests
test_scope = get_available_oper_test_scopes()
if opts.operability_checks in test_scope:
tests = IOperTestRunner(scope=opts.operability_checks)
test_results = tests.run()
# Export results in form of JUnit XML report to separate file
if opts.report_junit_file_name:
report_exporter = ReportExporter(ResultExporterType.JUNIT_OPER)
report_exporter.report_to_file(test_results, opts.report_junit_file_name)
else:
print("Unknown interoperability test scope name: '%s'" % (opts.operability_checks))
print("Available test scopes: %s" % (','.join(["'%s'" % n for n in test_scope])))
exit(0)
# Verbose test specification and MUTs configuration
if MUTs and opts.verbose:
print(print_muts_configuration_from_json(MUTs))
if test_spec and opts.verbose:
print(print_test_configuration_from_json(test_spec))
if opts.only_build_tests:
# We are skipping testing phase, and suppress summary
opts.suppress_summary = True
single_test = SingleTestRunner(_global_loops_count=opts.test_global_loops_value,
_test_loops_list=opts.test_loops_list,
_muts=MUTs,
_clean=opts.clean,
_parser=parser,
_opts=opts,
_opts_log_file_name=opts.log_file_name,
_opts_report_html_file_name=opts.report_html_file_name,
_opts_report_junit_file_name=opts.report_junit_file_name,
_opts_report_build_file_name=opts.report_build_file_name,
_opts_report_text_file_name=opts.report_text_file_name,
_test_spec=test_spec,
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
_opts_goanna_for_tests=opts.goanna_for_tests,
_opts_shuffle_test_order=opts.shuffle_test_order,
_opts_shuffle_test_seed=opts.shuffle_test_seed,
_opts_test_by_names=opts.test_by_names,
_opts_peripheral_by_names=opts.peripheral_by_names,
_opts_test_only_peripheral=opts.test_only_peripheral,
_opts_test_only_common=opts.test_only_common,
_opts_verbose_skipped_tests=opts.verbose_skipped_tests,
_opts_verbose_test_result_only=opts.verbose_test_result_only,
_opts_verbose=opts.verbose,
_opts_firmware_global_name=opts.firmware_global_name,
_opts_only_build_tests=opts.only_build_tests,
_opts_parallel_test_exec=opts.parallel_test_exec,
_opts_suppress_summary=opts.suppress_summary,
_opts_test_x_toolchain_summary=opts.test_x_toolchain_summary,
_opts_copy_method=opts.copy_method,
_opts_mut_reset_type=opts.mut_reset_type,
_opts_jobs=opts.jobs,
_opts_waterfall_test=opts.waterfall_test,
_opts_consolidate_waterfall_test=opts.consolidate_waterfall_test,
_opts_extend_test_timeout=opts.extend_test_timeout,
_opts_auto_detect=opts.auto_detect)
# Runs test suite in CLI mode
if (singletest_in_cli_mode(single_test)):
exit(0)
else:
exit(-1)
|
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
|
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.utils import classproperty
from potentials import LammpsPotential
import numpy as np
def generate_LAMMPS_structure(structure):
import numpy as np
types = [site.kind_name for site in structure.sites]
type_index_unique = np.unique(types, return_index=True)[1]
count_index_unique = np.diff(np.append(type_index_unique, [len(types)]))
atom_index = []
for i, index in enumerate(count_index_unique):
atom_index += [i for j in range(index)]
masses = [site.mass for site in structure.kinds]
positions = [site.position for site in structure.sites]
number_of_atoms = len(positions)
lammps_data_file = 'Generated using dynaphopy\n\n'
lammps_data_file += '{0} atoms\n\n'.format(number_of_atoms)
lammps_data_file += '{0} atom types\n\n'.format(len(masses))
cell = np.array(structure.cell)
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.arccos(np.dot(cell[1], cell[2])/(c*b))
gamma = np.arccos(np.dot(cell[1], cell[0])/(a*b))
beta = np.arccos(np.dot(cell[2], cell[0])/(a*c))
xhi = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
yhi = np.sqrt(pow(b,2)- pow(xy,2))
yz = (b*c*np.cos(alpha)-xy * xz)/yhi
zhi = np.sqrt(pow(c,2)-pow(xz,2)-pow(yz,2))
xhi = xhi + max(0,0, xy, xz, xy+xz)
yhi = yhi + max(0,0, yz)
lammps_data_file += '\n{0:20.10f} {1:20.10f} xlo xhi\n'.format(0, xhi)
lammps_data_file += '{0:20.10f} {1:20.10f} ylo yhi\n'.format(0, yhi)
lammps_data_file += '{0:20.10f} {1:20.10f} zlo zhi\n'.format(0, zhi)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f} xy xz yz\n\n'.format(xy, xz, yz)
lammps_data_file += 'Masses\n\n'
for i, mass in enumerate(masses):
lammps_data_file += '{0} {1:20.10f} \n'.format(i+1, mass)
lammps_data_file += '\nAtoms\n\n'
for i, row in enumerate(positions):
lammps_data_file += '{0} {1} {2:20.10f} {3:20.10f} {4:20.10f}\n'.format(i+1, atom_index[i]+1, row[0],row[1],row[2])
return lammps_data_file
def generate_LAMMPS_input(parameters,
potential_obj,
structure_file='potential.pot',
trajectory_file='trajectory.lammpstr'):
random_number = np.random.randint(10000000)
names_str = ' '.join(potential_obj._names)
lammps_input_file = 'units metal\n'
lammps_input_file += 'boundary p p p\n'
lammps_input_file += 'box tilt large\n'
lammps_input_file += 'atom_style atomic\n'
lammps_input_file += 'read_data {}\n'.format(structure_file)
lammps_input_file += potential_obj.get_input_potential_lines()
lammps_input_file += 'neighbor 0.3 bin\n'
lammps_input_file += 'neigh_modify every 1 delay 0 check no\n'
lammps_input_file += 'timestep {}\n'.format(parameters.dict.timestep)
lammps_input_file += 'thermo_style custom step etotal temp vol press\n'
lammps_input_file += 'thermo 1000\n'
lammps_input_file += 'velocity all create {0} {1} dist gaussian mom yes\n'.format(parameters.dict.temperature, random_number)
lammps_input_file += 'velocity all scale {}\n'.format(parameters.dict.temperature)
lammps_input_file += 'fix int all nvt temp {0} {0} {1}\n'.format(parameters.dict.temperature, parameters.dict.thermostat_variable)
lammps_input_file += 'run {}\n'.format(parameters.dict.equilibrium_steps)
lammps_input_file += 'reset_timestep 0\n'
lammps_input_file += 'dump aiida all custom {0} {1} element x y z\n'.format(parameters.dict.dump_rate, trajectory_file)
lammps_input_file += 'dump_modify aiida format "%4s %16.10f %16.10f %16.10f"\n'
lammps_input_file += 'dump_modify aiida sort id\n'
lammps_input_file += 'dump_modify aiida element {}\n'.format(names_str)
lammps_input_file += 'run {}\n'.format(parameters.dict.total_steps)
return lammps_input_file
class MdCalculation(JobCalculation):
"""
A basic plugin for calculating force constants using Lammps.
Requirement: the node should be able to import phonopy
"""
def _init_internal_params(self):
super(MdCalculation, self)._init_internal_params()
self._INPUT_FILE_NAME = 'input.in'
self._INPUT_POTENTIAL = 'potential.pot'
self._INPUT_STRUCTURE = 'input.data'
self._OUTPUT_TRAJECTORY_FILE_NAME = 'trajectory.lammpstrj'
self._OUTPUT_FILE_NAME = 'log.lammps'
self._default_parser = 'lammps.md'
@classproperty
def _use_methods(cls):
"""
Additional use_* methods for the namelists class.
"""
retdict = JobCalculation._use_methods
retdict.update({
"parameters": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the lammps input data "
"for the namelists"),
},
"potential": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'potential',
'docstring': ("Use a node that specifies the lammps potential "
"for the namelists"),
},
"structure": {
'valid_types': StructureData,
'additional_parameter': None,
'linkname': 'structure',
'docstring': "Use a node for the structure",
},
})
return retdict
def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
parameters_data = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters_data, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
potential_data = inputdict.pop(self.get_linkname('potential'))
except KeyError:
raise InputValidationError("No potential specified for this "
"calculation")
if not isinstance(potential_data, ParameterData):
raise InputValidationError("potential is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("no structure is specified for this calculation")
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("no code is specified for this calculation")
##############################
# END OF INITIAL INPUT CHECK #
##############################
# =================== prepare the python input files =====================
potential_object = LammpsPotential(potential_data, structure, potential_filename=self._INPUT_POTENTIAL)
structure_txt = generate_LAMMPS_structure(structure)
input_txt = generate_LAMMPS_input(parameters_data,
potential_object,
structure_file=self._INPUT_STRUCTURE,
trajectory_file=self._OUTPUT_TRAJECTORY_FILE_NAME)
potential_txt = potential_object.get_potential_file()
# =========================== dump to file =============================
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename, 'w') as infile:
infile.write(input_txt)
structure_filename = tempfolder.get_abs_path(self._INPUT_STRUCTURE)
with open(structure_filename, 'w') as infile:
infile.write(structure_txt)
if potential_txt is not None:
potential_filename = tempfolder.get_abs_path(self._INPUT_POTENTIAL)
with open(potential_filename, 'w') as infile:
infile.write(potential_txt)
# ============================ calcinfo ================================
local_copy_list = []
remote_copy_list = []
# additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[])
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
# Empty command line by default
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
# Retrieve files
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self._OUTPUT_TRAJECTORY_FILE_NAME)
calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME)
codeinfo = CodeInfo()
codeinfo.cmdline_params = ['-in', self._INPUT_FILE_NAME]
codeinfo.code_uuid = code.uuid
codeinfo.withmpi = True
calcinfo.codes_info = [codeinfo]
return calcinfo
#$MPI -n $NSLOTS $LAMMPS -sf gpu -pk gpu 2 neigh no -in in.md_data
|
|
import pytest
import uuid
import os
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
from awx.main.scheduler.dag_workflow import WorkflowDAG
class Job():
def __init__(self, status='successful'):
self.status = status
class WorkflowNode(object):
def __init__(self, id=None, job=None, do_not_run=False, unified_job_template=None):
self.id = id if id is not None else uuid.uuid4()
self.job = job
self.do_not_run = do_not_run
self.unified_job_template = unified_job_template
@pytest.fixture
def wf_node_generator(mocker):
pytest.count = 0
def fn(**kwargs):
wfn = WorkflowNode(id=pytest.count, unified_job_template=object(), **kwargs)
pytest.count += 1
return wfn
return fn
@pytest.fixture
def workflow_dag_1(wf_node_generator):
g = WorkflowDAG()
nodes = [wf_node_generator() for i in range(4)]
map(lambda n: g.add_node(n), nodes)
r'''
0
/\
S / \
/ \
1 |
| |
F | | S
| |
3 |
\ |
F \ |
\/
2
'''
g.add_edge(nodes[0], nodes[1], "success_nodes")
g.add_edge(nodes[0], nodes[2], "success_nodes")
g.add_edge(nodes[1], nodes[3], "failure_nodes")
g.add_edge(nodes[3], nodes[2], "failure_nodes")
return (g, nodes)
class TestWorkflowDAG():
@pytest.fixture
def workflow_dag_root_children(self, wf_node_generator):
g = WorkflowDAG()
wf_root_nodes = [wf_node_generator() for i in range(0, 10)]
wf_leaf_nodes = [wf_node_generator() for i in range(0, 10)]
map(lambda n: g.add_node(n), wf_root_nodes + wf_leaf_nodes)
'''
Pair up a root node with a single child via an edge
R1 R2 ... Rx
| | |
| | |
C1 C2 Cx
'''
map(lambda (i, n): g.add_edge(wf_root_nodes[i], n, 'label'), enumerate(wf_leaf_nodes))
return (g, wf_root_nodes, wf_leaf_nodes)
def test_get_root_nodes(self, workflow_dag_root_children):
(g, wf_root_nodes, ignore) = workflow_dag_root_children
assert set([n.id for n in wf_root_nodes]) == set([n['node_object'].id for n in g.get_root_nodes()])
class TestDNR():
def test_mark_dnr_nodes(self, workflow_dag_1):
(g, nodes) = workflow_dag_1
r'''
S0
/\
S / \
/ \
1 |
| |
F | | S
| |
3 |
\ |
F \ |
\/
2
'''
nodes[0].job = Job(status='successful')
do_not_run_nodes = g.mark_dnr_nodes()
assert 0 == len(do_not_run_nodes)
r'''
S0
/\
S / \
/ \
S1 |
| |
F | | S
| |
DNR 3 |
\ |
F \ |
\/
2
'''
nodes[1].job = Job(status='successful')
do_not_run_nodes = g.mark_dnr_nodes()
assert 1 == len(do_not_run_nodes)
assert nodes[3] == do_not_run_nodes[0]
class TestIsWorkflowDone():
@pytest.fixture
def workflow_dag_2(self, workflow_dag_1):
(g, nodes) = workflow_dag_1
r'''
S0
/\
S / \
/ \
S1 |
| |
F | | S
| |
DNR 3 |
\ |
F \ |
\/
W2
'''
nodes[0].job = Job(status='successful')
g.mark_dnr_nodes()
nodes[1].job = Job(status='successful')
g.mark_dnr_nodes()
nodes[2].job = Job(status='waiting')
return (g, nodes)
@pytest.fixture
def workflow_dag_failed(self, workflow_dag_1):
(g, nodes) = workflow_dag_1
r'''
S0
/\
S / \
/ \
S1 |
| |
F | | S
| |
DNR 3 |
\ |
F \ |
\/
F2
'''
nodes[0].job = Job(status='successful')
g.mark_dnr_nodes()
nodes[1].job = Job(status='successful')
g.mark_dnr_nodes()
nodes[2].job = Job(status='failed')
return (g, nodes)
@pytest.fixture
def workflow_dag_canceled(self, wf_node_generator):
g = WorkflowDAG()
nodes = [wf_node_generator() for i in range(1)]
map(lambda n: g.add_node(n), nodes)
r'''
F0
'''
nodes[0].job = Job(status='canceled')
return (g, nodes)
@pytest.fixture
def workflow_dag_failure(self, workflow_dag_canceled):
(g, nodes) = workflow_dag_canceled
nodes[0].job.status = 'failed'
return (g, nodes)
def test_done(self, workflow_dag_2):
g = workflow_dag_2[0]
assert g.is_workflow_done() is False
def test_workflow_done_and_failed(self, workflow_dag_failed):
(g, nodes) = workflow_dag_failed
assert g.is_workflow_done() is True
assert g.has_workflow_failed() == \
(True, smart_text(_("No error handle path for workflow job node(s) [({},{})] workflow job node(s)"
" missing unified job template and error handle path [].").format(nodes[2].id, nodes[2].job.status)))
def test_is_workflow_done_no_unified_job_tempalte_end(self, workflow_dag_failed):
(g, nodes) = workflow_dag_failed
nodes[2].unified_job_template = None
assert g.is_workflow_done() is True
assert g.has_workflow_failed() == \
(True, smart_text(_("No error handle path for workflow job node(s) [] workflow job node(s) missing"
" unified job template and error handle path [{}].").format(nodes[2].id)))
def test_is_workflow_done_no_unified_job_tempalte_begin(self, workflow_dag_1):
(g, nodes) = workflow_dag_1
nodes[0].unified_job_template = None
g.mark_dnr_nodes()
assert g.is_workflow_done() is True
assert g.has_workflow_failed() == \
(True, smart_text(_("No error handle path for workflow job node(s) [] workflow job node(s) missing"
" unified job template and error handle path [{}].").format(nodes[0].id)))
def test_canceled_should_fail(self, workflow_dag_canceled):
(g, nodes) = workflow_dag_canceled
assert g.has_workflow_failed() == \
(True, smart_text(_("No error handle path for workflow job node(s) [({},{})] workflow job node(s)"
" missing unified job template and error handle path [].").format(nodes[0].id, nodes[0].job.status)))
def test_failure_should_fail(self, workflow_dag_failure):
(g, nodes) = workflow_dag_failure
assert g.has_workflow_failed() == \
(True, smart_text(_("No error handle path for workflow job node(s) [({},{})] workflow job node(s)"
" missing unified job template and error handle path [].").format(nodes[0].id, nodes[0].job.status)))
class TestBFSNodesToRun():
@pytest.fixture
def workflow_dag_canceled(self, wf_node_generator):
g = WorkflowDAG()
nodes = [wf_node_generator() for i in range(4)]
map(lambda n: g.add_node(n), nodes)
r'''
C0
/ | \
F / A| \ S
/ | \
1 2 3
'''
g.add_edge(nodes[0], nodes[1], "failure_nodes")
g.add_edge(nodes[0], nodes[2], "always_nodes")
g.add_edge(nodes[0], nodes[3], "success_nodes")
nodes[0].job = Job(status='canceled')
return (g, nodes)
def test_cancel_still_runs_children(self, workflow_dag_canceled):
(g, nodes) = workflow_dag_canceled
g.mark_dnr_nodes()
assert set([nodes[1], nodes[2]]) == set(g.bfs_nodes_to_run())
@pytest.mark.skip(reason="Run manually to re-generate doc images")
class TestDocsExample():
@pytest.fixture
def complex_dag(self, wf_node_generator):
g = WorkflowDAG()
nodes = [wf_node_generator() for i in range(10)]
map(lambda n: g.add_node(n), nodes)
g.add_edge(nodes[0], nodes[1], "failure_nodes")
g.add_edge(nodes[0], nodes[2], "success_nodes")
g.add_edge(nodes[0], nodes[3], "always_nodes")
g.add_edge(nodes[1], nodes[4], "success_nodes")
g.add_edge(nodes[1], nodes[5], "failure_nodes")
g.add_edge(nodes[2], nodes[6], "failure_nodes")
g.add_edge(nodes[3], nodes[6], "success_nodes")
g.add_edge(nodes[4], nodes[6], "always_nodes")
g.add_edge(nodes[6], nodes[7], "always_nodes")
g.add_edge(nodes[6], nodes[8], "success_nodes")
g.add_edge(nodes[6], nodes[9], "failure_nodes")
return (g, nodes)
def test_dnr_step(self, complex_dag):
(g, nodes) = complex_dag
base_dir = '/awx_devel'
g.generate_graphviz_plot(file_name=os.path.join(base_dir, "workflow_step0.gv"))
nodes[0].job = Job(status='successful')
g.mark_dnr_nodes()
g.generate_graphviz_plot(file_name=os.path.join(base_dir, "workflow_step1.gv"))
nodes[2].job = Job(status='successful')
nodes[3].job = Job(status='successful')
g.mark_dnr_nodes()
g.generate_graphviz_plot(file_name=os.path.join(base_dir, "workflow_step2.gv"))
nodes[6].job = Job(status='failed')
g.mark_dnr_nodes()
g.generate_graphviz_plot(file_name=os.path.join(base_dir, "workflow_step3.gv"))
nodes[7].job = Job(status='successful')
nodes[9].job = Job(status='successful')
g.mark_dnr_nodes()
g.generate_graphviz_plot(file_name=os.path.join(base_dir, "workflow_step4.gv"))
|
|
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import collections
import netaddr
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron._i18n import _LE, _LW
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_router_base
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import exceptions
from neutron.common import utils as common_utils
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
# Tracks the arp entry cache
Arp_entry = collections.namedtuple(
'Arp_entry', 'ip mac subnet_id operation')
class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def __init__(self, agent, host, *args, **kwargs):
super(DvrLocalRouter, self).__init__(agent, host, *args, **kwargs)
self.floating_ips_dict = {}
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.fip_ns = None
self._pending_arp_set = set()
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrLocalRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def _handle_fip_nat_rules(self, interface_name):
"""Configures NAT rules for Floating IPs for DVR."""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add the NAT rule back
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority(floating_ip)
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(ip=fixed_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_ip_addr_adv_notif(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(ip=floating_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(floating_ip)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
super(DvrLocalRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _cache_arp_entry(self, ip, mac, subnet_id, operation):
"""Cache the arp entries if device not ready."""
arp_entry_tuple = Arp_entry(ip=ip,
mac=mac,
subnet_id=subnet_id,
operation=operation)
self._pending_arp_set.add(arp_entry_tuple)
def _process_arp_cache_for_internal_port(self, subnet_id):
"""Function to process the cached arp entries."""
arp_remove = set()
for arp_entry in self._pending_arp_set:
if subnet_id == arp_entry.subnet_id:
try:
state = self._update_arp_entry(
arp_entry.ip, arp_entry.mac,
arp_entry.subnet_id, arp_entry.operation)
except Exception:
state = False
if state:
# If the arp update was successful, then
# go ahead and add it to the remove set
arp_remove.add(arp_entry)
self._pending_arp_set -= arp_remove
def _delete_arp_cache_for_internal_port(self, subnet_id):
"""Function to delete the cached arp entries."""
arp_delete = set()
for arp_entry in self._pending_arp_set:
if subnet_id == arp_entry.subnet_id:
arp_delete.add(arp_entry)
self._pending_arp_set -= arp_delete
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return False
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if device.exists():
if operation == 'add':
device.neigh.add(ip, mac)
elif operation == 'delete':
device.neigh.delete(ip, mac)
return True
else:
if operation == 'add':
LOG.warn(_LW("Device %s does not exist so ARP entry "
"cannot be updated, will cache information "
"to be applied later when the device exists"),
device)
self._cache_arp_entry(ip, mac, subnet_id, operation)
return False
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
def _set_subnet_arp_info(self, subnet_id):
"""Set ARP info retrieved from Plugin for existing ports."""
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
self._process_arp_cache_for_internal_port(subnet_id)
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
if isinstance(ip_cidr, six.text_type):
ip_cidr = ip_cidr.encode() # Needed for Python 3.x
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
snat_idx):
try:
ns_ip_device.route.delete_gateway(gw_ip_addr,
table=snat_idx)
except exceptions.DeviceNotFoundError:
pass
def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add):
"""Adds or removes rules and routes for SNAT redirection."""
try:
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
if is_add:
ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
for port_fixed_ip in sn_port['fixed_ips']:
# Iterate and find the gateway IP address matching
# the IP version
port_ip_addr = port_fixed_ip['ip_address']
port_ip_vers = netaddr.IPAddress(port_ip_addr).version
for gw_fixed_ip in gateway['fixed_ips']:
gw_ip_addr = gw_fixed_ip['ip_address']
if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers:
sn_port_cidr = common_utils.ip_to_cidr(
port_ip_addr, port_fixed_ip['prefixlen'])
snat_idx = self._get_snat_idx(sn_port_cidr)
if is_add:
ns_ipd.route.add_gateway(gw_ip_addr,
table=snat_idx)
ns_ipr.rule.add(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
ns_ipwrapr.netns.execute(
['sysctl', '-w',
'net.ipv4.conf.%s.send_redirects=0' % sn_int])
else:
self._delete_gateway_device_if_exists(ns_ipd,
gw_ip_addr,
snat_idx)
ns_ipr.rule.delete(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
except Exception:
if is_add:
exc = _LE('DVR: error adding redirection logic')
else:
exc = _LE('DVR: snat remove failed to clear the rule '
'and device')
LOG.exception(exc)
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True)
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
def internal_network_added(self, port):
super(DvrLocalRouter, self).internal_network_added(port)
# NOTE: The following function _set_subnet_arp_info
# should be called to dynamically populate the arp
# entries for the dvr services ports into the router
# namespace. This does not have dependency on the
# external_gateway port or the agent_mode.
for subnet in port['subnets']:
self._set_subnet_arp_info(subnet['id'])
self._snat_redirect_add_from_port(port)
def _snat_redirect_add_from_port(self, port):
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port, port, interface_name)
def _dvr_internal_network_removed(self, port):
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port, port, interface_name)
# Clean up the cached arp entries related to the port subnet
for subnet in port['subnets']:
self._delete_arp_cache_for_internal_port(subnet)
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrLocalRouter, self).internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
def get_external_device_interface_name(self, ex_gw_port):
fip_int = self.fip_ns.get_int_device_name(self.router_id)
if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()):
return self.fip_ns.get_rtr_ext_device_name(self.router_id)
def external_gateway_added(self, ex_gw_port, interface_name):
# TODO(Carl) Refactor external_gateway_added/updated/removed to use
# super class implementation where possible. Looks like preserve_ips,
# and ns_name are the key differences.
ip_wrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapr.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.send_redirects=0'])
for p in self.internal_ports:
gateway = self.get_snat_port_for_internal_port(p)
id_name = self.get_internal_device_name(p['id'])
if gateway:
self._snat_redirect_add(gateway, p, id_name)
for port in self.get_snat_interfaces():
for ip in port['fixed_ips']:
self._update_arp_entry(ip['ip_address'],
port['mac_address'],
ip['subnet_id'],
'add')
def external_gateway_updated(self, ex_gw_port, interface_name):
pass
def external_gateway_removed(self, ex_gw_port, interface_name):
# TODO(Carl) Should this be calling process_snat_dnat_for_fip?
self.process_floating_ip_nat_rules()
if self.fip_ns:
to_fip_interface_name = (
self.get_external_device_interface_name(ex_gw_port))
self.process_floating_ip_addresses(to_fip_interface_name)
for p in self.internal_ports:
# NOTE: When removing the gateway port, pass in the snat_port
# cache along with the current ports.
gateway = self.get_snat_port_for_internal_port(p, self.snat_ports)
internal_interface = self.get_internal_device_name(p['id'])
self._snat_redirect_remove(gateway, p, internal_interface)
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
pass
def _get_address_scope_mark(self):
# Prepare address scope iptables rule for internal ports
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
ports_scopemark = self._get_port_devicename_scopemark(
internal_ports, self.get_internal_device_name)
# DVR local router don't need to consider external port
return ports_scopemark
def process_external(self, agent):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.create_dvr_fip_interfaces(ex_gw_port)
super(DvrLocalRouter, self).process_external(agent)
def create_dvr_fip_interfaces(self, ex_gw_port):
floating_ips = self.get_floating_ips()
fip_agent_port = self.get_floating_agent_gw_interface(
ex_gw_port['network_id'])
if fip_agent_port:
LOG.debug("FloatingIP agent gateway port received from the "
"plugin: %s", fip_agent_port)
is_first = False
if floating_ips:
is_first = self.fip_ns.subscribe(ex_gw_port['network_id'])
if is_first and not fip_agent_port:
LOG.debug("No FloatingIP agent gateway port possibly due to "
"late binding of the private port to the host, "
"requesting agent gateway port for 'network-id' :"
"%s", ex_gw_port['network_id'])
fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
self.agent.context, ex_gw_port['network_id'])
if not fip_agent_port:
LOG.error(_LE("No FloatingIP agent gateway port "
"returned from server for 'network-id': "
"%s"), ex_gw_port['network_id'])
if is_first and fip_agent_port:
if 'subnets' not in fip_agent_port:
LOG.error(_LE('Missing subnet/agent_gateway_port'))
else:
self.fip_ns.create_gateway_port(fip_agent_port)
if (self.fip_ns.agent_gateway_port and
(self.dist_fip_count == 0)):
self.fip_ns.create_rtr_2_fip_link(self)
# kicks the FW Agent to add rules for the IR namespace if
# configured
self.agent.process_router_add(self)
def process(self, agent):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
self.fip_ns.scan_fip_ports(self)
super(DvrLocalRouter, self).process(agent)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import hashlib
import hmac
import json
import os
import subprocess
import sys
import time
import urllib
import passlib.hash
from keystone import config
from keystone.common import logging
CONF = config.CONF
config.register_int('crypt_strength', default=40000)
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_PASSWORD_LENGTH = 4096
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
LOG.debug('Inner Exception: %s', exc)
raise
def import_object(import_str, *args, **kw):
"""Returns an object including a module or module and class."""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls(*args, **kw)
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
"""
possible_locations = [
config_path,
os.path.join('etc', 'keystone', config_path),
os.path.join('etc', config_path),
os.path.join(config_path),
'/etc/keystone/%s' % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise Exception('Config not found: %s', os.path.abspath(config_path))
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
class SmarterEncoder(json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
return super(SmarterEncoder, self).default(obj)
class Ec2Signer(object):
"""Hacked up code from boto/connection.py"""
def __init__(self, secret_key):
secret_key = secret_key.encode()
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def generate(self, credentials):
"""Generate auth string according to what SignatureVersion is given."""
if credentials['params']['SignatureVersion'] == '0':
return self._calc_signature_0(credentials['params'])
if credentials['params']['SignatureVersion'] == '1':
return self._calc_signature_1(credentials['params'])
if credentials['params']['SignatureVersion'] == '2':
return self._calc_signature_2(credentials['params'],
credentials['verb'],
credentials['host'],
credentials['path'])
raise Exception('Unknown Signature Version: %s' %
credentials['params']['SignatureVersion'])
@staticmethod
def _get_utf8_value(value):
"""Get the UTF8-encoded version of a value."""
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def _calc_signature_0(self, params):
"""Generate AWS signature version 0 string."""
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
"""Generate AWS signature version 1 string."""
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
for key in keys:
self.hmac.update(key)
val = self._get_utf8_value(params[key])
self.hmac.update(val)
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
LOG.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
current_hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
LOG.debug('query string: %s', qs)
string_to_sign += qs
LOG.debug('string_to_sign: %s', string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
LOG.debug('len(b64)=%d', len(b64))
LOG.debug('base64 encoded digest: %s', b64)
return b64
def trunc_password(password):
"""Truncate passwords to the MAX_PASSWORD_LENGTH."""
if len(password) > MAX_PASSWORD_LENGTH:
return password[:MAX_PASSWORD_LENGTH]
else:
return password
def hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
if passlib.hash.sha512_crypt.identify(password_utf8):
return password_utf8
h = passlib.hash.sha512_crypt.encrypt(password_utf8,
rounds=CONF.crypt_strength)
return h
def ldap_hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
h = passlib.hash.ldap_salted_sha1.encrypt(password_utf8)
return h
def ldap_check_password(password, hashed):
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
h = passlib.hash.ldap_salted_sha1.encrypt(password_utf8)
return passlib.hash.ldap_salted_sha1.verify(password_utf8, hashed)
def check_password(password, hashed):
"""Check that a plaintext password matches hashed.
hashpw returns the salt value concatenated with the actual hash value.
It extracts the actual salt if this value is then passed as the salt.
"""
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
# From python 2.7
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(['ls', '-l', '/dev/null'])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(['/bin/sh', '-c',
... 'ls -l non_existent_file ; exit 0'],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
LOG.debug(' '.join(popenargs[0]))
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get('args')
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def git(*args):
return check_output(['git'] + list(args))
def isotime(dt_obj):
"""Format datetime object as ISO compliant string.
:param dt_obj: datetime.datetime object
:returns: string representation of datetime object
"""
return dt_obj.strftime(ISO_TIME_FORMAT)
def unixtime(dt_obj):
"""Format datetime object as unix timestamp
:param dt_obj: datetime.datetime object
:returns: float
"""
return time.mktime(dt_obj.utctimetuple())
def auth_str_equal(provided, known):
"""Constant-time string comparison.
:params provided: the first string
:params known: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. When using the function for this purpose, always
provide the user-provided password as the first argument. The time this
function will take is always a factor of the length of this string.
"""
result = 0
p_len = len(provided)
k_len = len(known)
for i in xrange(p_len):
a = ord(provided[i]) if i < p_len else 0
b = ord(known[i]) if i < k_len else 0
result |= a ^ b
return (p_len == k_len) & (result == 0)
|
|
# <license>
# Copyright (C) 2011 Andrea Interguglielmi, All rights reserved.
# This file is part of the coral repository downloaded from http://code.google.com/p/coral-repo.
#
# Modified for the Hive system by Sjoerd de Vries
# All modifications copyright (C) 2012 Sjoerd de Vries, All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# </license>
from __future__ import print_function, absolute_import
import copy
import weakref
from ..anyQt import QtGui, QtCore
#from ... import coralApp
#from ..._coral import ErrorObject
#from . import nodeView
class ConnectionHook(QtGui.QGraphicsItem):
def __init__(self,
parentAttributeUi, mode, shape, style,
parentItem=None, hoverText=None, orderDependent=False
):
if parentItem is None: # parentItem is used by builtinUis.ContainedAttributeUiProxy
parentItem = parentAttributeUi
QtGui.QGraphicsItem.__init__(self, parentItem)
self._parentNodeUi = weakref.ref(parentAttributeUi.parentNodeUi())
self._parentAttributeUi = weakref.ref(parentAttributeUi)
assert mode in ("input", "output"), mode
self._mode = mode
assert shape in ("circle", "square"), shape
self._shape = shape
assert style in ("dot", "dashed", "solid"), style
self._style = style
self._rect = QtCore.QRectF(0, 0, 12, 12)
self._color = QtGui.QColor(200, 200, 200)
self._brush = QtGui.QBrush(self.color())
self._pen = QtGui.QPen(QtCore.Qt.NoPen)
self._draggingConnection = None
self._draggingConnectionEndHook = None
self._connections = []
self._hoverText = hoverText
self._orderDependent = orderDependent
self._mixedColor = False
self.setFlag(QtGui.QGraphicsItem.ItemSendsScenePositionChanges, True)
self._pen.setWidthF(1.0)
self.setAcceptsHoverEvents(True)
self._selectedConnection = None
"""
def reparent(self, parentAttributeUi):
self._parentNodeUi = weakref.ref(parentAttributeUi.parentNodeUi())
self._parentAttributeUi = weakref.ref(parentAttributeUi)
pos = self.scenePos()
self.setParentItem(parentAttributeUi)
self.setPos(self.mapFromScene(pos))
"""
def _tabKey(self):
self._selectNextConnection()
def _bspKey(self):
self._selectPrevConnection()
def _deleteKey(self):
l = len(self._connections)
if l == 0: return
sel = self._selectedConnection
if sel is None:
if l == 1:
nr = 0
else:
return
else:
nr = self._selectedConnection
inputConnection = self._connections[nr]
canvas = self._parentNodeUi().scene()._hqt
if canvas:
ok = canvas().gui_removes_connection(inputConnection)
if not ok:
return
self.parentAttributeUi().parentNodeUi().update()
inputConnection.deleteIt()
self._selectConnection(None)
def _plusKey(self):
if not self._orderDependent: return
if self._selectedConnection is None: return
old_pos = self._selectedConnection
new_pos = old_pos + 1
if new_pos == len(self._connections): new_pos = 0
self._rearrange_connection(old_pos, new_pos)
def _minusKey(self):
if not self._orderDependent: return
if self._selectedConnection is None: return
old_pos = self._selectedConnection
new_pos = old_pos - 1
if new_pos == -1: new_pos = len(self._connections) - 1
self._rearrange_connection(old_pos, new_pos)
def _numKey(self, num):
if not self._orderDependent: return
if self._selectedConnection is None: return
old_pos = self._selectedConnection
new_pos = num - 1
if new_pos >= len(self._connections):
new_pos = len(self._connections) - 1
self._rearrange_connection(old_pos, new_pos)
def _rearrange_connection(self, old_pos, new_pos):
connection = self._connections[old_pos]
mode = "before"
new_pos2 = new_pos + 1
if new_pos2 == len(self._connections):
new_pos2 = new_pos
mode = "after"
other_connection = self._connections[new_pos2]
ok = True
canvas = self._parentNodeUi().scene()._hqt
if canvas:
ok = canvas().gui_rearranges_connection(
connection, other_connection, mode
)
if not ok: return
sel_con = self._connections[self._selectedConnection]
connection = self._connections.pop(old_pos)
if new_pos == len(self._connections):
self._connections.append(connection)
else:
self._connections.insert(new_pos, connection)
self._selectedConnection = self._connections.index(sel_con)
for con in self._connections: con.updatePath()
def _selectNextConnection(self):
l = len(self._connections)
if l <= 1: return
if self._selectedConnection is None:
nr = 0
else:
nr = self._selectedConnection + 1
if nr == l: nr = 0
self._selectConnection(nr)
def _selectPrevConnection(self):
l = len(self._connections)
if l <= 1: return
if self._selectedConnection is None:
nr = l - 1
else:
nr = self._selectedConnection - 1
if nr == -1: nr = l - 1
self._selectConnection(nr)
def _selectConnection(self, nr):
if nr is None:
if self._selectedConnection is None: return
for cnr, conn in enumerate(self.connections()):
conn.setSelected(False)
conn.setActive(True)
conn.update()
else:
for cnr, conn in enumerate(self.connections()):
conn.setSelected(cnr == nr)
conn.update()
self._selectedConnection = nr
def isInput(self):
return self._mode == "input"
def isOutput(self):
return self._mode == "output"
def connections(self):
return self._connections
def hoverEnterEvent(self, event):
self.scene().setFocusedHook(self)
self._selectedConnection = None
for conn in self.connections():
conn.setActive(True)
conn.update()
canvas = self._parentNodeUi().scene()._hqt
if canvas:
if self._hoverText is None:
canvas().clear_statusbar_message()
else:
canvas().set_statusbar_message(self._hoverText)
def hoverLeaveEvent(self, event):
self.scene().setFocusedHook(None)
for conn in self.connections():
conn.setActive(False)
conn.update()
canvas = self._parentNodeUi().scene()._hqt
if canvas:
canvas().clear_statusbar_message()
def setMixedColor(self, value=True):
self._mixedColor = value
def setBorderEnabled(self, value=True):
if value:
self._pen.setStyle(QtCore.Qt.SolidLine)
else:
self._pen.setStyle(QtCore.Qt.NoPen)
def updateToolTip(self):
tooltip = self._parentAttributeUi().toolTip()
if tooltip is None: tooltip = ""
self.setToolTip(tooltip)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemScenePositionHasChanged:
self.updateWorldPos()
return value
def updateWorldPos(self):
if self._mode == "input":
for conn in self._connections:
conn.updateEndPos()
else:
for conn in self._connections:
conn.updateStartPos()
def addConnection(self, connection):
self._selectConnection(None)
assert connection not in self._connections
self._connections.append(connection)
for con in self._connections: con.updatePath()
def connectionIndex(self, connection):
return self._connections.index(connection), len(self._connections)
def removeConnection(self, connection):
self._selectConnection(None)
self._connections.remove(connection)
for con in self._connections: con.updatePath()
def parentAttributeUi(self):
return self._parentAttributeUi()
def parentNodeUi(self):
return self._parentNodeUi()
def setColor(self, color):
self._color.setRgb(color.red(), color.green(), color.blue())
self._brush.setColor(self._color)
self._pen.setColor(self._color.darker(150))
def color(self):
return QtGui.QColor(self._color)
def mixedColor(self):
return self._mixedColor
def setColorRef(self, color):
self._color = color
def colorRef(self):
return self._color
def mousePressEvent(self, event):
from . import Connection
if self._draggingConnection:
if event.button() == QtCore.Qt.RightButton:
mousePos = self._draggingConnection.mapFromScene(event.scenePos())
self._draggingConnection.insertInterpoint(None, mousePos)
event.accept()
return
if event.button() == QtCore.Qt.RightButton:
event.ignore()
return
if self._mode == "output":
self._selectConnection(None)
self._draggingConnection = Connection(self)
self._draggingConnection.setActive(False)
elif self._mode == "input" and len(self._connections):
con_index = -1
if self._selectedConnection is not None:
con_index = self._selectedConnection
inputConnection = self._connections[con_index]
canvas = self._parentNodeUi().scene()._hqt
if canvas:
ok = canvas().gui_removes_connection(inputConnection)
if not ok:
event.accept()
return
self.parentAttributeUi().parentNodeUi().update()
outHook = inputConnection.startHook()
inputConnection.deleteIt()
self._draggingConnection = Connection(outHook)
self._draggingConnection.setActive(False)
mousePos = self._draggingConnection.mapFromScene(event.scenePos())
self._draggingConnection.endHook().setPos(mousePos)
self._draggingConnection.updatePath()
def _handleHover(self, item):
nodeHovered = None
collidingItems = item.collidingItems(QtCore.Qt.IntersectsItemBoundingRect)
if collidingItems:
nodeHovered = collidingItems[0]
if nodeHovered:
nodeHovered.hoverEnterEvent(None)
#elif nodeView.NodeView._lastHoveredItem:
# nodeView.NodeView._lastHoveredItem.hoverLeaveEvent(None)
def mouseMoveEvent(self, event):
if self._draggingConnection:
mousePos = self._draggingConnection.mapFromScene(event.scenePos())
self.drag(mousePos)
def drag(self, mousePos):
connectionStartHook = self._draggingConnection.startHook()
self._draggingConnection.setColor(connectionStartHook.color())
connectionEndHook = self._draggingConnection.endHook()
connectionEndHook.setPos(mousePos)
self._handleHover(self._draggingConnection.endHook())
endHook = self._draggingConnection.findClosestHook()
self._draggingConnection.setActive(False)
if endHook:
ok = True
canvas = self._parentNodeUi().scene()._hqt
if canvas is not None:
ok = canvas().gui_asks_connection(self._draggingConnection, endHook)
if ok:
self._draggingConnection.setActive(True)
hookSize = endHook.boundingRect().bottomRight() / 2.0
hookPos = self._draggingConnection.mapFromItem(endHook, hookSize.x(), hookSize.y())
connectionEndHook.setPos(hookPos)
else:
self._draggingConnection.setActive(False)
else:
#TODO
if QtGui.QToolTip.isVisible():
QtGui.QToolTip.hideText()
self._draggingConnection.updateEndPos()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self._draggingConnection:
endHook = self._draggingConnection.findClosestHook()
if endHook:
dummyEndHook = self._draggingConnection.endHook()
self._draggingConnection.setEndHook(endHook)
if dummyEndHook is not None and dummyEndHook is not endHook:
self.scene().removeItem(dummyEndHook)
hookSize = endHook.boundingRect().bottomRight() / 2.0
hookPos = self._draggingConnection.mapFromItem(endHook, hookSize.x(), hookSize.y())
self._draggingConnection.setActive(True)
self._draggingConnection._isTempConnection = False
draggingConnection = self._draggingConnection
self._draggingConnection = None
canvas = self._parentNodeUi().scene()._hqt
force = False
if event.modifiers() == QtCore.Qt.ControlModifier:
force = True
if canvas:
ok = canvas().gui_adds_connection(draggingConnection, force)
if not ok:
if not force:
draggingConnection.deleteIt()
self.parentAttributeUi().parentNodeUi().update()
else:
self._cancelDraggingConnection()
def _cancelDraggingConnection(self):
startHook = self._draggingConnection.startHook()
self._draggingConnection.deleteIt()
self._draggingConnection = None
if self._draggingConnectionEndHook:
self._draggingConnectionEndHook = None
self.parentAttributeUi().parentNodeUi().update()
def boundingRect(self):
return self._rect
def paint(self, painter, option, widget):
painter.setBrush(self._brush)
painter.setPen(self._pen)
if self._shape == "circle":
painter.drawEllipse(self._rect)
elif self._shape == "square":
painter.save()
c = self._rect.center()
painter.translate(c)
painter.rotate(45)
painter.scale(0.8, 0.8)
painter.drawRect(self._rect.translated(-c))
painter.restore()
else:
raise ValueError(self._shape)
if self._mixedColor:
painter.setBrush(painter.brush().color().darker(130))
painter.drawChord(self._rect, 1 * 16, 180 * 16)
def deleteIt(self):
conns = list(self.connections())
for conn in conns:
conn.deleteIt()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop browsers that can be controlled by telemetry."""
import logging
import os
import sys
import dependency_manager # pylint: disable=import-error
from telemetry.core import exceptions
from telemetry.core import platform as platform_module
from telemetry.internal.backends.chrome import desktop_browser_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import desktop_device
from telemetry.internal.util import binary_manager
# This is a workaround for https://goo.gl/1tGNgd
from telemetry.internal.util import path as path_module
class PossibleDesktopBrowser(possible_browser.PossibleBrowser):
"""A desktop browser that can be controlled."""
def __init__(self, browser_type, finder_options, executable, flash_path,
is_content_shell, browser_directory, is_local_build=False):
target_os = sys.platform.lower()
super(PossibleDesktopBrowser, self).__init__(
browser_type, target_os, not is_content_shell)
assert browser_type in FindAllBrowserTypes(finder_options), (
'Please add %s to desktop_browser_finder.FindAllBrowserTypes' %
browser_type)
self._local_executable = executable
self._flash_path = flash_path
self._is_content_shell = is_content_shell
self._browser_directory = browser_directory
self.is_local_build = is_local_build
def __repr__(self):
return 'PossibleDesktopBrowser(type=%s, executable=%s, flash=%s)' % (
self.browser_type, self._local_executable, self._flash_path)
def _InitPlatformIfNeeded(self):
if self._platform:
return
self._platform = platform_module.GetHostPlatform()
# pylint: disable=protected-access
self._platform_backend = self._platform._platform_backend
def Create(self, finder_options):
if self._flash_path and not os.path.exists(self._flash_path):
logging.warning(
'Could not find Flash at %s. Continuing without Flash.\n'
'To run with Flash, check it out via http://go/read-src-internal',
self._flash_path)
self._flash_path = None
self._InitPlatformIfNeeded()
browser_backend = desktop_browser_backend.DesktopBrowserBackend(
self._platform_backend,
finder_options.browser_options, self._local_executable,
self._flash_path, self._is_content_shell, self._browser_directory,
output_profile_path=finder_options.output_profile_path,
extensions_to_load=finder_options.extensions_to_load)
return browser.Browser(
browser_backend, self._platform_backend, self._credentials_path)
def SupportsOptions(self, finder_options):
if (len(finder_options.extensions_to_load) != 0) and self._is_content_shell:
return False
return True
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
if os.path.exists(self._local_executable):
return os.path.getmtime(self._local_executable)
return -1
def SelectDefaultBrowser(possible_browsers):
local_builds_by_date = [
b for b in sorted(possible_browsers,
key=lambda b: b.last_modification_time())
if b.is_local_build]
if local_builds_by_date:
return local_builds_by_date[-1]
return None
def CanFindAvailableBrowsers():
return not platform_module.GetHostPlatform().GetOSName() == 'chromeos'
def CanPossiblyHandlePath(target_path):
_, extension = os.path.splitext(target_path.lower())
if sys.platform == 'darwin' or sys.platform.startswith('linux'):
return not extension
elif sys.platform.startswith('win'):
return extension == '.exe'
return False
def FindAllBrowserTypes(_):
return [
'exact',
'reference',
'release',
'release_x64',
'debug',
'debug_x64',
'default',
'stable',
'beta',
'dev',
'canary',
'content-shell-debug',
'content-shell-debug_x64',
'content-shell-release',
'content-shell-release_x64',
'content-shell-default',
'system']
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all the desktop browsers available on this machine."""
if not isinstance(device, desktop_device.DesktopDevice):
return []
browsers = []
if not CanFindAvailableBrowsers():
return []
has_x11_display = True
if (sys.platform.startswith('linux') and
os.getenv('DISPLAY') == None):
has_x11_display = False
os_name = platform_module.GetHostPlatform().GetOSName()
arch_name = platform_module.GetHostPlatform().GetArchName()
try:
flash_path = binary_manager.LocalPath('flash', arch_name, os_name)
except dependency_manager.NoPathFoundError:
flash_path = None
logging.warning(
'Chrome build location is not specified. Browser will be run without '
'Flash.')
chromium_app_names = []
if sys.platform == 'darwin':
chromium_app_names.append('Chromium.app/Contents/MacOS/Chromium')
chromium_app_names.append('Google Chrome.app/Contents/MacOS/Google Chrome')
content_shell_app_name = 'Content Shell.app/Contents/MacOS/Content Shell'
elif sys.platform.startswith('linux'):
chromium_app_names.append('chrome')
content_shell_app_name = 'content_shell'
elif sys.platform.startswith('win'):
chromium_app_names.append('chrome.exe')
content_shell_app_name = 'content_shell.exe'
else:
raise Exception('Platform not recognized')
# Add the explicit browser executable if given and we can handle it.
if (finder_options.browser_executable and
CanPossiblyHandlePath(finder_options.browser_executable)):
is_content_shell = finder_options.browser_executable.endswith(
content_shell_app_name)
is_chrome_or_chromium = len([x for x in chromium_app_names if
finder_options.browser_executable.endswith(x)]) != 0
# It is okay if the executable name doesn't match any of known chrome
# browser executables, since it may be of a different browser.
if is_chrome_or_chromium or is_content_shell:
normalized_executable = os.path.expanduser(
finder_options.browser_executable)
if path_module.IsExecutable(normalized_executable):
browser_directory = os.path.dirname(finder_options.browser_executable)
browsers.append(PossibleDesktopBrowser(
'exact', finder_options, normalized_executable, flash_path,
is_content_shell,
browser_directory))
else:
raise exceptions.PathMissingError(
'%s specified by --browser-executable does not exist or is not '
'executable' %
normalized_executable)
def AddIfFound(browser_type, build_path, app_name, content_shell):
app = os.path.join(build_path, app_name)
if path_module.IsExecutable(app):
browsers.append(PossibleDesktopBrowser(
browser_type, finder_options, app, flash_path,
content_shell, build_path, is_local_build=True))
return True
return False
# Add local builds
for build_path in path_module.GetBuildDirectories(finder_options.chrome_root):
# TODO(agrieve): Extract browser_type from args.gn's is_debug.
browser_type = os.path.basename(build_path).lower()
for chromium_app_name in chromium_app_names:
AddIfFound(browser_type, build_path, chromium_app_name, False)
AddIfFound('content-shell-' + browser_type, build_path,
content_shell_app_name, True)
reference_build = None
if finder_options.browser_type == 'reference':
# Reference builds are only available in a Chromium checkout. We should not
# raise an error just because they don't exist.
os_name = platform_module.GetHostPlatform().GetOSName()
arch_name = platform_module.GetHostPlatform().GetArchName()
reference_build = binary_manager.FetchPath(
'reference_build', arch_name, os_name)
# Mac-specific options.
if sys.platform == 'darwin':
mac_canary_root = '/Applications/Google Chrome Canary.app/'
mac_canary = mac_canary_root + 'Contents/MacOS/Google Chrome Canary'
mac_system_root = '/Applications/Google Chrome.app'
mac_system = mac_system_root + '/Contents/MacOS/Google Chrome'
if path_module.IsExecutable(mac_canary):
browsers.append(PossibleDesktopBrowser('canary', finder_options,
mac_canary, None, False,
mac_canary_root))
if path_module.IsExecutable(mac_system):
browsers.append(PossibleDesktopBrowser('system', finder_options,
mac_system, None, False,
mac_system_root))
if reference_build and path_module.IsExecutable(reference_build):
reference_root = os.path.dirname(os.path.dirname(os.path.dirname(
reference_build)))
browsers.append(PossibleDesktopBrowser('reference', finder_options,
reference_build, None, False,
reference_root))
# Linux specific options.
if sys.platform.startswith('linux'):
versions = {
'system': os.path.split(os.path.realpath('/usr/bin/google-chrome'))[0],
'stable': '/opt/google/chrome',
'beta': '/opt/google/chrome-beta',
'dev': '/opt/google/chrome-unstable'
}
for version, root in versions.iteritems():
browser_path = os.path.join(root, 'chrome')
if path_module.IsExecutable(browser_path):
browsers.append(PossibleDesktopBrowser(version, finder_options,
browser_path, None, False, root))
if reference_build and path_module.IsExecutable(reference_build):
reference_root = os.path.dirname(reference_build)
browsers.append(PossibleDesktopBrowser('reference', finder_options,
reference_build, None, False,
reference_root))
# Win32-specific options.
if sys.platform.startswith('win'):
app_paths = [
('system', os.path.join('Google', 'Chrome', 'Application')),
('canary', os.path.join('Google', 'Chrome SxS', 'Application')),
]
if reference_build:
app_paths.append(
('reference', os.path.dirname(reference_build)))
for browser_name, app_path in app_paths:
for chromium_app_name in chromium_app_names:
app_path = os.path.join(app_path, chromium_app_name)
app_path = path_module.FindInstalledWindowsApplication(app_path)
if app_path:
browsers.append(PossibleDesktopBrowser(
browser_name, finder_options, app_path,
None, False, os.path.dirname(app_path)))
has_ozone_platform = False
for arg in finder_options.browser_options.extra_browser_args:
if "--ozone-platform" in arg:
has_ozone_platform = True
if len(browsers) and not has_x11_display and not has_ozone_platform:
logging.warning(
'Found (%s), but you do not have a DISPLAY environment set.' %
','.join([b.browser_type for b in browsers]))
return []
return browsers
|
|
import os, psycopg2, re, time
import logging
import shutil
from urlparse import urlparse
logger = logging.getLogger(__name__)
class Postgresql:
def __init__(self, config):
self.name = config["name"]
self.params = config.get('parameters', {})
self.listen_addrs = self.params.get('listen_addresses', 'localhost') \
.split(',')
self.host = self.listen_addrs[0]
self.port = self.params.get('port', 5432)
self.data_dir = config["data_dir"]
self.replication = config["replication"]
self.config = config
self.cursor_holder = None
self.connection_string = "postgres://%s:%s@%s:%s/postgres" % (self.replication["username"], self.replication["password"], self.host, self.port)
self.conn = None
def cursor(self):
if not self.cursor_holder:
self.conn = psycopg2.connect("postgres://%s:%s/postgres" % (self.host, self.port))
self.conn.autocommit = True
self.cursor_holder = self.conn.cursor()
return self.cursor_holder
def disconnect(self):
try:
self.conn.close()
except Exception as e:
logger.error("Error disconnecting: %s" % e)
def query(self, sql):
max_attempts = 0
while True:
try:
self.cursor().execute(sql)
break
except psycopg2.OperationalError as e:
if self.conn:
self.disconnect()
self.cursor_holder = None
if max_attempts > 4:
raise e
max_attempts += 1
time.sleep(5)
return self.cursor()
def data_directory_empty(self):
return not os.path.exists(self.data_dir) or os.listdir(self.data_dir) == []
def drop_cluster(self):
shutil.rmtree(self.data_dir)
os.mkdir(self.data_dir, 0700)
def initialize(self):
logger.info("Initializing cluster in %s" % self.data_dir)
if os.system("initdb -D %s" % self.data_dir) == 0:
# start Postgres without options to setup replication user indepedent of other system settings
self.write_pg_hba()
os.system("pg_ctl start -w -D %s -o '%s'" % (self.data_dir, self.server_options()))
self.create_replication_user()
os.system("pg_ctl stop -w -m fast -D %s" % self.data_dir)
return True
else:
logger.error("Could not initialize cluster in %s" % self.data_dir)
return False
def sync_from_leader(self, leader):
leader = urlparse(leader["address"])
logger.info("Syncing base backup from leader %s" % leader.hostname)
f = open("./pgpass", "w")
f.write("%(hostname)s:%(port)s:*:%(username)s:%(password)s\n" %
{"hostname": leader.hostname, "port": leader.port, "username": leader.username, "password": leader.password})
f.close()
os.system("chmod 600 pgpass")
return os.system("PGPASSFILE=pgpass pg_basebackup -R -D %(data_dir)s --host=%(host)s --port=%(port)s -U %(username)s" %
{"data_dir": self.data_dir, "host": leader.hostname, "port": leader.port, "username": leader.username}) == 0
def is_leader(self):
return not self.query("SELECT pg_is_in_recovery();").fetchone()[0]
def is_running(self):
return os.system("pg_ctl status -D %s > /dev/null" % self.data_dir) == 0
def start(self):
if self.is_running():
logger.error("Cannot start PostgreSQL because one is already running.")
return False
pid_path = "%s/postmaster.pid" % self.data_dir
if os.path.exists(pid_path):
os.remove(pid_path)
logger.info("Removed %s" % pid_path)
return os.system("pg_ctl start -w -D %s -o '%s'" % (self.data_dir, self.server_options())) == 0
def stop(self, mode='fast'):
return os.system("pg_ctl stop -w -D %s -m %s -w" % (self.data_dir, mode)) != 0
def reload(self):
return os.system("pg_ctl reload -w -D %s" % self.data_dir) == 0
def restart(self):
return os.system("pg_ctl restart -w -D %s -m fast" % self.data_dir) == 0
def server_options(self):
options = "-c listen_addresses=%s -c port=%s" % (self.host, self.port)
for setting, value in self.config["parameters"].iteritems():
options += " -c \"%s=%s\"" % (setting, value)
return options
def is_healthy(self):
if not self.is_running():
logger.warning("Postgresql is not running.")
return False
if self.is_leader():
return True
return True
def is_healthiest_node(self, state_store):
# this should only happen on initialization
if state_store.last_leader_operation() is None:
return True
if (state_store.last_leader_operation() - self.xlog_position()) > self.config["maximum_lag_on_failover"]:
return False
for member in state_store.members():
if member["hostname"] == self.name:
continue
try:
member_conn = psycopg2.connect(member["address"])
member_conn.autocommit = True
member_cursor = member_conn.cursor()
member_cursor.execute("SELECT %s - (pg_last_xlog_replay_location() - '0/000000'::pg_lsn) AS bytes;" % self.xlog_position())
xlog_diff = member_cursor.fetchone()[0]
logger.info([self.name, member["hostname"], xlog_diff])
if xlog_diff < 0:
member_cursor.close()
return False
member_cursor.close()
except psycopg2.OperationalError:
continue
return True
def replication_slot_name(self):
member = os.environ.get("MEMBER")
(member, _) = re.subn(r'[^a-z0-9]+', r'_', member)
return member
def write_pg_hba(self):
pg_hba = self.params.get('hba_file',
'{}/pg_hba.conf'.format(self.data_dir))
with open(pg_hba, "a") as f:
f.write("host replication %(username)s %(network)s md5" %
{"username": self.replication["username"],
"network": self.replication["network"]})
for entry in self.config.get('hba_entries') or []:
f.write('\n')
f.write(entry)
def write_recovery_conf(self, leader_hash):
f = open("%s/recovery.conf" % self.data_dir, "w")
f.write("""
standby_mode = 'on'
primary_slot_name = '%(recovery_slot)s'
recovery_target_timeline = 'latest'
""" % {"recovery_slot": self.name})
if leader_hash is not None:
leader = urlparse(leader_hash["address"])
f.write("""
primary_conninfo = 'user=%(user)s password=%(password)s host=%(hostname)s port=%(port)s sslmode=prefer sslcompression=1'
""" % {"user": leader.username, "password": leader.password, "hostname": leader.hostname, "port": leader.port})
if "recovery_conf" in self.config:
for name, value in self.config["recovery_conf"].iteritems():
f.write("%s = '%s'\n" % (name, value))
f.close()
def follow_the_leader(self, leader_hash):
leader = urlparse(leader_hash["address"])
if os.system("grep 'host=%(hostname)s port=%(port)s' %(data_dir)s/recovery.conf > /dev/null" % {"hostname": leader.hostname, "port": leader.port, "data_dir": self.data_dir}) != 0:
self.write_recovery_conf(leader_hash)
self.restart()
return True
def follow_no_leader(self):
if not os.path.exists("%s/recovery.conf" % self.data_dir) or os.system("grep primary_conninfo %(data_dir)s/recovery.conf &> /dev/null" % {"data_dir": self.data_dir}) == 0:
self.write_recovery_conf(None)
if self.is_running():
self.restart()
return True
def promote(self):
return os.system("pg_ctl promote -w -D %s" % self.data_dir) == 0
def demote(self, state_store, leader):
logger.info("Stopping server")
if not self.stop('fast'):
self.stop('immediate')
logger.info("Dropping cluster")
self.drop_cluster()
logger.info("Syncing from leader")
self.sync_from_leader(leader)
self.write_recovery_conf(leader)
# Make sure we are present in members list so that
# a proper replication slot is created.
state_store.touch_member(self.name, self.connection_string)
time.sleep(5)
self.start()
def create_replication_user(self):
self.query("CREATE USER \"%s\" WITH REPLICATION ENCRYPTED PASSWORD '%s';" % (self.replication["username"], self.replication["password"]))
def xlog_position(self):
return self.query("SELECT pg_last_xlog_replay_location() - '0/0000000'::pg_lsn;").fetchone()[0] or 0
def last_operation(self):
return self.query("SELECT pg_current_xlog_location() - '0/00000'::pg_lsn;").fetchone()[0]
|
|
import collections
from dateutil.parser import parse
from datetime import datetime, timedelta
import json
from urlparse import urlparse
from waffle.models import Switch
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.contrib.auth.tokens import default_token_generator
from django.forms.models import model_to_dict
from django.utils.http import urlsafe_base64_encode
from mock import Mock, patch
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.abuse.models import AbuseReport
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser, Category
from olympia.amo.helpers import urlparams
from olympia.amo.pyquery_wrapper import PyQuery as pq
from olympia.amo.urlresolvers import reverse
from olympia.bandwagon.models import Collection, CollectionWatcher
from olympia.devhub.models import ActivityLog
from olympia.reviews.models import Review
from olympia.users import notifications as email
from olympia.users.models import (
BlacklistedPassword, UserProfile, UserNotification)
from olympia.users.utils import EmailResetCode, UnsubscribeCode
from olympia.users.views import tshirt_eligible
def migrate_path(next_path=None):
return urlparams(reverse('users.migrate'), to=next_path)
def fake_request():
request = Mock()
request.LANG = 'foo'
request.GET = request.META = {}
# Fake out host/scheme for Persona login.
request.get_host.return_value = urlparse(settings.SITE_URL).netloc
request.is_secure.return_value = False
return request
def check_sidebar_links(self, expected):
r = self.client.get(self.url)
assert r.status_code == 200
links = pq(r.content)('#secondary-nav ul a')
amo.tests.check_links(expected, links)
assert links.filter('.selected').attr('href') == self.url
class TestTShirtOrder(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def test_normal_user(self):
user = UserProfile.objects.get(email='regular@mozilla.com')
assert not tshirt_eligible(user)
def test_listed_dev(self):
addon = Addon.objects.get(pk=3615)
user = addon.authors.get()
assert tshirt_eligible(user)
def test_unlisted_dev(self):
addon = Addon.objects.get(pk=3615)
user = addon.authors.get()
addon.update(is_listed=False)
assert not tshirt_eligible(user)
addon.versions.get().files.get().update(is_signed=True)
assert tshirt_eligible(user)
def test_persona_dev(self):
addon = Addon.objects.get(pk=3615)
user = addon.authors.get()
addon.update(type=amo.ADDON_PERSONA,
average_daily_users=1)
assert not tshirt_eligible(user)
addon.update(average_daily_users=10000)
assert tshirt_eligible(user)
class UserViewBase(TestCase):
fixtures = ['users/test_backends']
def setUp(self):
super(UserViewBase, self).setUp()
self.client = amo.tests.TestClient()
self.client.get('/')
self.user = UserProfile.objects.get(id='4043307')
def get_profile(self):
return UserProfile.objects.get(id=self.user.id)
class TestAjax(UserViewBase):
def setUp(self):
super(TestAjax, self).setUp()
self.client.login(username='jbalogh@mozilla.com', password='password')
def test_ajax_404(self):
r = self.client.get(reverse('users.ajax'), follow=True)
assert r.status_code == 404
def test_ajax_success(self):
r = self.client.get(reverse('users.ajax'), {'q': 'fligtar@gmail.com'},
follow=True)
data = json.loads(r.content)
assert data == {
'status': 1, 'message': '', 'id': 9945,
'name': u'Justin Scott \u0627\u0644\u062a\u0637\u0628'}
def test_ajax_xss(self):
self.user.display_name = '<script>alert("xss")</script>'
self.user.save()
assert '<script>' in self.user.display_name, (
'Expected <script> to be in display name')
r = self.client.get(reverse('users.ajax'),
{'q': self.user.email, 'dev': 0})
assert '<script>' not in r.content
assert '<script>' in r.content
def test_ajax_failure_incorrect_email(self):
r = self.client.get(reverse('users.ajax'), {'q': 'incorrect'},
follow=True)
data = json.loads(r.content)
assert data == (
{'status': 0,
'message': 'A user with that email address does not exist.'})
def test_ajax_failure_no_email(self):
r = self.client.get(reverse('users.ajax'), {'q': ''}, follow=True)
data = json.loads(r.content)
assert data == (
{'status': 0,
'message': 'An email address is required.'})
def test_forbidden(self):
self.client.logout()
r = self.client.get(reverse('users.ajax'))
assert r.status_code == 401
class TestEdit(UserViewBase):
def setUp(self):
super(TestEdit, self).setUp()
self.client.login(username='jbalogh@mozilla.com', password='password')
self.user = UserProfile.objects.get(username='jbalogh')
self.url = reverse('users.edit')
self.data = {'username': 'jbalogh', 'email': 'jbalogh@mozilla.com',
'oldpassword': 'password', 'password': 'longenough',
'password2': 'longenough', 'lang': 'en-US'}
def test_password_logs(self):
res = self.client.post(self.url, self.data)
assert res.status_code == 302
assert self.user.userlog_set.filter(
activity_log__action=amo.LOG.CHANGE_PASSWORD.id).count() == 1
def test_password_empty(self):
admingroup = Group(rules='Users:Edit')
admingroup.save()
GroupUser.objects.create(group=admingroup, user=self.user)
homepage = {'username': 'jbalogh', 'email': 'jbalogh@mozilla.com',
'homepage': 'http://cbc.ca', 'lang': 'en-US'}
res = self.client.post(self.url, homepage)
assert res.status_code == 302
def test_password_blacklisted(self):
BlacklistedPassword.objects.create(password='password')
bad = self.data.copy()
bad['password'] = 'password'
res = self.client.post(self.url, bad)
assert res.status_code == 200
assert not res.context['form'].is_valid()
assert res.context['form'].errors['password'] == (
[u'That password is not allowed.'])
def test_password_short(self):
bad = self.data.copy()
bad['password'] = 'short'
res = self.client.post(self.url, bad)
assert res.status_code == 200
assert not res.context['form'].is_valid()
assert res.context['form'].errors['password'] == (
[u'Must be 8 characters or more.'])
def test_email_change_mail_sent(self):
data = {'username': 'jbalogh',
'email': 'jbalogh.changed@mozilla.com',
'display_name': 'DJ SurfNTurf',
'lang': 'en-US'}
r = self.client.post(self.url, data, follow=True)
self.assert3xx(r, self.url)
self.assertContains(r, 'An email has been sent to %s' % data['email'])
# The email shouldn't change until they confirm, but the name should
u = UserProfile.objects.get(id='4043307')
assert u.name == 'DJ SurfNTurf'
assert u.email == 'jbalogh@mozilla.com'
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.find('Please confirm your email') == 0
assert mail.outbox[0].body.find('%s/emailchange/' % self.user.id) > 0
@patch.object(settings, 'SEND_REAL_EMAIL', False)
def test_email_change_mail_send_even_with_fake_email(self):
data = {'username': 'jbalogh',
'email': 'jbalogh.changed@mozilla.com',
'display_name': 'DJ SurfNTurf',
'lang': 'en-US'}
self.client.post(self.url, data, follow=True)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.find('Please confirm your email') == 0
def test_edit_bio(self):
assert self.get_profile().bio is None
data = {'username': 'jbalogh',
'email': 'jbalogh.changed@mozilla.com',
'bio': 'xxx unst unst',
'lang': 'en-US'}
r = self.client.post(self.url, data, follow=True)
self.assert3xx(r, self.url)
self.assertContains(r, data['bio'])
assert unicode(self.get_profile().bio) == data['bio']
data['bio'] = 'yyy unst unst'
r = self.client.post(self.url, data, follow=True)
self.assert3xx(r, self.url)
self.assertContains(r, data['bio'])
assert unicode(self.get_profile().bio) == data['bio']
def check_default_choices(self, choices, checked=True):
doc = pq(self.client.get(self.url).content)
assert doc('input[name=notifications]:checkbox').length == len(choices)
for id, label in choices:
box = doc('input[name=notifications][value=%s]' % id)
if checked:
assert box.filter(':checked').length == 1
else:
assert box.length == 1
parent = box.parent('label')
if checked:
# Check for "NEW" message.
assert parent.find('.msg').length == 1
assert parent.remove('.msg, .req').text() == label
def post_notifications(self, choices):
self.check_default_choices(choices)
self.data['notifications'] = []
r = self.client.post(self.url, self.data)
self.assert3xx(r, self.url, 302)
assert UserNotification.objects.count() == len(email.NOTIFICATIONS)
assert UserNotification.objects.filter(enabled=True).count() == (
len(filter(lambda x: x.mandatory, email.NOTIFICATIONS)))
self.check_default_choices(choices, checked=False)
def test_edit_notifications(self):
# Make jbalogh a developer.
AddonUser.objects.create(
user=self.user,
addon=Addon.objects.create(type=amo.ADDON_EXTENSION))
choices = email.NOTIFICATIONS_CHOICES
self.check_default_choices(choices)
self.data['notifications'] = [2, 4, 6]
r = self.client.post(self.url, self.data)
self.assert3xx(r, self.url, 302)
mandatory = [n.id for n in email.NOTIFICATIONS if n.mandatory]
total = len(self.data['notifications'] + mandatory)
assert UserNotification.objects.count() == len(email.NOTIFICATIONS)
assert UserNotification.objects.filter(enabled=True).count() == total
doc = pq(self.client.get(self.url).content)
assert doc('input[name=notifications]:checked').length == total
assert doc('.more-none').length == len(email.NOTIFICATION_GROUPS)
assert doc('.more-all').length == len(email.NOTIFICATION_GROUPS)
def test_edit_notifications_non_dev(self):
self.post_notifications(email.NOTIFICATIONS_CHOICES_NOT_DEV)
def test_edit_notifications_non_dev_error(self):
self.data['notifications'] = [2, 4, 6]
r = self.client.post(self.url, self.data)
assert r.context['form'].errors['notifications']
def test_collections_toggles(self):
r = self.client.get(self.url)
assert r.status_code == 200
doc = pq(r.content)
assert doc('#profile-misc').length == 1
def test_remove_locale_bad_request(self):
r = self.client.post(self.user.get_user_url('remove-locale'))
assert r.status_code == 400
@patch.object(UserProfile, 'remove_locale')
def test_remove_locale(self, remove_locale_mock):
r = self.client.post(self.user.get_user_url('remove-locale'),
{'locale': 'el'})
assert r.status_code == 200
remove_locale_mock.assert_called_with('el')
def test_remove_locale_default_locale(self):
r = self.client.post(self.user.get_user_url('remove-locale'),
{'locale': settings.LANGUAGE_CODE})
assert r.status_code == 400
class TestEditAdmin(UserViewBase):
fixtures = ['base/users']
def setUp(self):
super(TestEditAdmin, self).setUp()
self.client.login(username='admin@mozilla.com', password='password')
self.regular = self.get_user()
self.url = reverse('users.admin_edit', args=[self.regular.pk])
def get_data(self):
data = model_to_dict(self.regular)
data['admin_log'] = 'test'
del data['password']
del data['fxa_id']
return data
def get_user(self):
# Using pk so that we can still get the user after anonymize.
return UserProfile.objects.get(pk=10482)
def test_edit(self):
res = self.client.get(self.url)
assert res.status_code == 200
def test_edit_without_user_lang(self):
self.regular.lang = None
self.regular.save()
res = self.client.get(self.url)
assert res.status_code == 200
def test_edit_forbidden(self):
self.client.logout()
self.client.login(username='editor@mozilla.com', password='password')
res = self.client.get(self.url)
assert res.status_code == 403
def test_edit_forbidden_anon(self):
self.client.logout()
res = self.client.get(self.url)
assert res.status_code == 302
def test_anonymize(self):
data = self.get_data()
data['anonymize'] = True
res = self.client.post(self.url, data)
assert res.status_code == 302
assert self.get_user().password == "sha512$Anonymous$Password"
def test_anonymize_fails(self):
data = self.get_data()
data['anonymize'] = True
data['email'] = 'something@else.com'
res = self.client.post(self.url, data)
assert res.status_code == 200
assert self.get_user().password == self.regular.password
def test_admin_logs_edit(self):
data = self.get_data()
data['email'] = 'something@else.com'
self.client.post(self.url, data)
res = ActivityLog.objects.filter(action=amo.LOG.ADMIN_USER_EDITED.id)
assert res.count() == 1
assert self.get_data()['admin_log'] in res[0]._arguments
def test_admin_logs_anonymize(self):
data = self.get_data()
data['anonymize'] = True
self.client.post(self.url, data)
res = (ActivityLog.objects
.filter(action=amo.LOG.ADMIN_USER_ANONYMIZED.id))
assert res.count() == 1
assert self.get_data()['admin_log'] in res[0]._arguments
def test_admin_no_password(self):
data = self.get_data()
data.update({'password': 'pass1234',
'password2': 'pass1234',
'oldpassword': 'password'})
self.client.post(self.url, data)
logs = ActivityLog.objects.filter
assert logs(action=amo.LOG.CHANGE_PASSWORD.id).count() == 0
res = logs(action=amo.LOG.ADMIN_USER_EDITED.id)
assert res.count() == 1
assert res[0].details['password'][0] == u'****'
def test_delete_user_display_name_xss(self):
# This is to test for bug 835827.
self.regular.display_name = '"><img src=a onerror=alert(1)><a a="'
self.regular.save()
delete_url = reverse('admin:users_userprofile_delete',
args=(self.regular.pk,))
res = self.client.post(delete_url, {'post': 'yes'}, follow=True)
assert self.regular.display_name not in res.content
FakeResponse = collections.namedtuple("FakeResponse", "status_code content")
class TestPasswordAdmin(UserViewBase):
fixtures = ['base/users']
def setUp(self):
super(TestPasswordAdmin, self).setUp()
self.client.login(username='editor@mozilla.com', password='password')
self.url = reverse('users.edit')
self.correct = {'username': 'editor',
'email': 'editor@mozilla.com',
'oldpassword': 'password', 'password': 'longenough',
'password2': 'longenough', 'lang': 'en-US'}
def test_password_admin(self):
res = self.client.post(self.url, self.correct, follow=False)
assert res.status_code == 200
assert not res.context['form'].is_valid()
assert res.context['form'].errors['password'] == (
[u'Letters and numbers required.'])
def test_password(self):
UserProfile.objects.get(username='editor').groups.all().delete()
res = self.client.post(self.url, self.correct, follow=False)
assert res.status_code == 302
class TestEmailChange(UserViewBase):
def setUp(self):
super(TestEmailChange, self).setUp()
self.token, self.hash = EmailResetCode.create(self.user.id,
'nobody@mozilla.org')
def test_fail(self):
# Completely invalid user, valid code
url = reverse('users.emailchange', args=[1234, self.token, self.hash])
r = self.client.get(url, follow=True)
assert r.status_code == 404
# User is in the system, but not attached to this code, valid code
url = reverse('users.emailchange', args=[9945, self.token, self.hash])
r = self.client.get(url, follow=True)
assert r.status_code == 400
# Valid user, invalid code
url = reverse('users.emailchange', args=[self.user.id, self.token,
self.hash[:-3]])
r = self.client.get(url, follow=True)
assert r.status_code == 400
def test_success(self):
assert self.user.email == 'jbalogh@mozilla.com'
url = reverse('users.emailchange', args=[self.user.id, self.token,
self.hash])
r = self.client.get(url, follow=True)
assert r.status_code == 200
u = UserProfile.objects.get(id=self.user.id)
assert u.email == 'nobody@mozilla.org'
def test_email_change_to_an_existing_user_email(self):
token, hash_ = EmailResetCode.create(self.user.id, 'testo@example.com')
url = reverse('users.emailchange', args=[self.user.id, token, hash_])
r = self.client.get(url, follow=True)
assert r.status_code == 400
class TestLogin(UserViewBase):
fixtures = ['users/test_backends', 'base/addon_3615']
def setUp(self):
super(TestLogin, self).setUp()
self.url = reverse('users.login')
self.data = {'username': 'jbalogh@mozilla.com', 'password': 'password'}
def test_client_login(self):
"""
This is just here to make sure Test Client's login() works with
our custom code.
"""
assert not self.client.login(username='jbalogh@mozilla.com',
password='wrongpassword')
assert self.client.login(**self.data)
def test_double_login(self):
r = self.client.post(self.url, self.data, follow=True)
self.assert3xx(r, reverse('home'))
# If you go to the login page when you're already logged in we bounce
# you.
r = self.client.get(self.url, follow=True)
self.assert3xx(r, reverse('home'))
def test_ok_redirects(self):
r = self.client.post(self.url, self.data, follow=True)
self.assert3xx(r, reverse('home'))
r = self.client.get(self.url + '?to=/de/firefox/', follow=True)
self.assert3xx(r, '/de/firefox/')
def test_absolute_redirect_url(self):
# We should always be using relative paths so don't allow absolute
# URLs even if they're on the same domain.
r = self.client.get(reverse('home'))
assert 'Log out' not in r.content
r = self.client.post(self.url, self.data, follow=True)
self.assert3xx(r, reverse('home'))
assert 'Log out' in r.content
r = self.client.get(
self.url + '?to=http://testserver/en-US/firefox/users/edit')
self.assert3xx(r, '/')
def test_bad_redirect_other_domain(self):
r = self.client.get(reverse('home'))
assert 'Log out' not in r.content
r = self.client.post(
self.url + '?to=https://example.com/this/is/bad',
self.data, follow=True)
self.assert3xx(r, reverse('home'))
assert 'Log out' in r.content
def test_bad_redirect_js(self):
r = self.client.get(reverse('home'))
assert 'Log out' not in r.content
r = self.client.post(
self.url + '?to=javascript:window.alert("xss");',
self.data, follow=True)
self.assert3xx(r, reverse('home'))
assert 'Log out' in r.content
def test_double_login_fxa_enabled(self):
self.create_switch('fxa-auth', active=True)
r = self.client.post(self.url, self.data, follow=True)
self.assert3xx(r, migrate_path())
# If you go to the login page when you're already logged in we bounce
# you.
r = self.client.get(self.url, follow=True)
self.assert3xx(r, reverse('home'))
def test_ok_redirects_fxa_enabled(self):
self.create_switch('fxa-auth', active=True)
r = self.client.post(
self.url + '?to=/de/firefox/here/', self.data, follow=True)
self.assert3xx(r, migrate_path('/de/firefox/here/'))
r = self.client.get(
self.url + '?to=/de/firefox/extensions/', follow=True)
self.assert3xx(r, '/de/firefox/extensions/')
def test_bad_redirect_other_domain_fxa_enabled(self):
self.create_switch('fxa-auth', active=True)
r = self.client.post(
self.url + '?to=https://example.com/this/is/bad',
self.data, follow=True)
self.assert3xx(r, migrate_path())
def test_bad_redirect_js_fxa_enabled(self):
self.create_switch('fxa-auth', active=True)
r = self.client.post(
self.url + '?to=javascript:window.alert("xss");',
self.data, follow=True)
self.assert3xx(r, migrate_path())
def test_login_link(self):
r = self.client.get(self.url)
assert r.status_code == 200
assert pq(r.content)('#aux-nav li.login').length == 1
def test_logout_link(self):
self.test_client_login()
r = self.client.get(reverse('home'))
assert r.status_code == 200
assert pq(r.content)('#aux-nav li.logout').length == 1
@amo.tests.mobile_test
def test_mobile_login(self):
r = self.client.get(self.url)
assert r.status_code == 200
doc = pq(r.content)('header')
assert doc('nav').length == 1
assert doc('#home').length == 1
assert doc('#auth-nav li.login').length == 0
def test_login_ajax(self):
url = reverse('users.login_modal')
r = self.client.get(url)
assert r.status_code == 200
res = self.client.post(url, data=self.data)
assert res.status_code == 302
def test_login_ajax_error(self):
url = reverse('users.login_modal')
data = self.data
data['username'] = ''
res = self.client.post(url, data=self.data)
assert res.context['form'].errors['username'][0] == (
'This field is required.')
def test_login_ajax_wrong(self):
url = reverse('users.login_modal')
data = self.data
data['username'] = 'jeffb@mozilla.com'
res = self.client.post(url, data=self.data)
text = 'Please enter a correct username and password.'
assert res.context['form'].errors['__all__'][0].startswith(text)
def test_login_no_recaptcha(self):
res = self.client.post(self.url, data=self.data)
assert res.status_code == 302
@patch.object(settings, 'NOBOT_RECAPTCHA_PRIVATE_KEY', 'something')
@patch.object(settings, 'LOGIN_RATELIMIT_USER', 2)
def test_login_attempts_recaptcha(self):
res = self.client.post(self.url, data=self.data)
assert res.status_code == 200
assert res.context['form'].fields.get('recaptcha')
@patch.object(settings, 'NOBOT_RECAPTCHA_PRIVATE_KEY', 'something')
def test_login_shown_recaptcha(self):
data = self.data.copy()
data['recaptcha_shown'] = ''
res = self.client.post(self.url, data=data)
assert res.status_code == 200
assert res.context['form'].fields.get('recaptcha')
@patch.object(settings, 'NOBOT_RECAPTCHA_PRIVATE_KEY', 'something')
@patch.object(settings, 'LOGIN_RATELIMIT_USER', 2)
@patch('olympia.amo.fields.ReCaptchaField.clean')
def test_login_with_recaptcha(self, clean):
clean.return_value = ''
data = self.data.copy()
data.update({'recaptcha': '', 'recaptcha_shown': ''})
res = self.client.post(self.url, data=data)
assert res.status_code == 302
def test_login_fails_increment(self):
# It increments even when the form is wrong.
user = UserProfile.objects.filter(email=self.data['username'])
assert user.get().failed_login_attempts == 3
self.client.post(self.url, data={'username': self.data['username']})
assert user.get().failed_login_attempts == 4
def test_doubled_account(self):
"""
Logging in to an account that shares a User object with another
account works properly.
"""
profile = UserProfile.objects.create(username='login_test',
email='bob@example.com')
profile.set_password('bazpassword')
profile.email = 'charlie@example.com'
profile.save()
profile2 = UserProfile.objects.create(username='login_test2',
email='bob@example.com')
profile2.set_password('foopassword')
profile2.save()
res = self.client.post(self.url,
data={'username': 'charlie@example.com',
'password': 'wrongpassword'})
assert res.status_code == 200
assert UserProfile.objects.get(
email='charlie@example.com').failed_login_attempts == 1
res2 = self.client.post(self.url,
data={'username': 'charlie@example.com',
'password': 'bazpassword'})
assert res2.status_code == 302
res3 = self.client.post(self.url, data={'username': 'bob@example.com',
'password': 'foopassword'})
assert res3.status_code == 302
def test_changed_account(self):
"""
Logging in to an account that had its email changed succeeds.
"""
profile = UserProfile.objects.create(username='login_test',
email='bob@example.com')
profile.set_password('bazpassword')
profile.email = 'charlie@example.com'
profile.save()
res = self.client.post(self.url,
data={'username': 'charlie@example.com',
'password': 'wrongpassword'})
assert res.status_code == 200
assert UserProfile.objects.get(
email='charlie@example.com').failed_login_attempts == 1
res2 = self.client.post(self.url,
data={'username': 'charlie@example.com',
'password': 'bazpassword'})
assert res2.status_code == 302
@patch.object(settings, 'NOBOT_RECAPTCHA_PRIVATE_KEY', '')
@patch('olympia.users.models.UserProfile.log_login_attempt')
class TestFailedCount(UserViewBase):
fixtures = ['users/test_backends', 'base/addon_3615']
def setUp(self):
super(TestFailedCount, self).setUp()
self.url = reverse('users.login')
self.data = {'username': 'jbalogh@mozilla.com', 'password': 'password'}
def log_calls(self, obj):
return [call[0][0] for call in obj.call_args_list]
def test_login_passes(self, log_login_attempt):
self.client.post(self.url, data=self.data)
assert self.log_calls(log_login_attempt) == [True]
def test_login_fails(self, log_login_attempt):
self.client.post(self.url, data={'username': self.data['username']})
assert self.log_calls(log_login_attempt) == [False]
def test_login_deleted(self, log_login_attempt):
(UserProfile.objects.get(email=self.data['username'])
.update(deleted=True))
self.client.post(self.url, data={'username': self.data['username']})
assert self.log_calls(log_login_attempt) == [False]
def test_login_confirmation(self, log_login_attempt):
(UserProfile.objects.get(email=self.data['username'])
.update(confirmationcode='123'))
self.client.post(self.url, data={'username': self.data['username']})
assert self.log_calls(log_login_attempt) == [False]
def test_login_get(self, log_login_attempt):
self.client.get(self.url, data={'username': self.data['username']})
assert not log_login_attempt.called
def test_login_get_no_data(self, log_login_attempt):
self.client.get(self.url)
assert not log_login_attempt.called
class TestUnsubscribe(UserViewBase):
fixtures = ['base/users']
def setUp(self):
super(TestUnsubscribe, self).setUp()
self.user = UserProfile.objects.get(email='editor@mozilla.com')
def test_correct_url_update_notification(self):
# Make sure the user is subscribed
perm_setting = email.NOTIFICATIONS[0]
un = UserNotification.objects.create(notification_id=perm_setting.id,
user=self.user,
enabled=True)
# Create a URL
token, hash = UnsubscribeCode.create(self.user.email)
url = reverse('users.unsubscribe', args=[token, hash,
perm_setting.short])
# Load the URL
r = self.client.get(url)
doc = pq(r.content)
# Check that it was successful
assert doc('#unsubscribe-success').length
assert doc('#standalone').length
assert doc('#standalone ul li').length == 1
# Make sure the user is unsubscribed
un = UserNotification.objects.filter(notification_id=perm_setting.id,
user=self.user)
assert un.count() == 1
assert not un.all()[0].enabled
def test_correct_url_new_notification(self):
# Make sure the user is subscribed
assert not UserNotification.objects.count()
# Create a URL
perm_setting = email.NOTIFICATIONS[0]
token, hash = UnsubscribeCode.create(self.user.email)
url = reverse('users.unsubscribe', args=[token, hash,
perm_setting.short])
# Load the URL
r = self.client.get(url)
doc = pq(r.content)
# Check that it was successful
assert doc('#unsubscribe-success').length
assert doc('#standalone').length
assert doc('#standalone ul li').length == 1
# Make sure the user is unsubscribed
un = UserNotification.objects.filter(notification_id=perm_setting.id,
user=self.user)
assert un.count() == 1
assert not un.all()[0].enabled
def test_wrong_url(self):
perm_setting = email.NOTIFICATIONS[0]
token, hash = UnsubscribeCode.create(self.user.email)
hash = hash[::-1] # Reverse the hash, so it's wrong
url = reverse('users.unsubscribe', args=[token, hash,
perm_setting.short])
r = self.client.get(url)
doc = pq(r.content)
assert doc('#unsubscribe-fail').length == 1
class TestReset(UserViewBase):
fixtures = ['base/users']
def setUp(self):
super(TestReset, self).setUp()
self.user = UserProfile.objects.get(email='editor@mozilla.com')
self.token = [urlsafe_base64_encode(str(self.user.id)),
default_token_generator.make_token(self.user)]
def test_reset_msg(self):
res = self.client.get(reverse('users.pwreset_confirm',
args=self.token))
assert 'For your account' in res.content
def test_csrf_token_presence(self):
res = self.client.get(reverse('users.pwreset_confirm',
args=self.token))
assert 'csrfmiddlewaretoken' in res.content
def test_reset_fails(self):
res = self.client.post(reverse('users.pwreset_confirm',
args=self.token),
data={'new_password1': 'spassword',
'new_password2': 'spassword'})
assert res.context['form'].errors['new_password1'][0] == (
'Letters and numbers required.')
def test_reset_succeeds(self):
assert not self.user.check_password('password1')
res = self.client.post(reverse('users.pwreset_confirm',
args=self.token),
data={'new_password1': 'password1',
'new_password2': 'password1'})
assert self.user.reload().check_password('password1')
assert res.status_code == 302
def test_reset_incorrect_padding(self):
"""Fixes #929. Even if the b64 padding is incorrect, don't 500."""
token = ["1kql8", "2xg-9f90e30ba5bda600910d"]
res = self.client.get(reverse('users.pwreset_confirm', args=token))
assert not res.context['validlink']
def test_reset_msg_migrated_waffle_off(self):
self.user.update(fxa_id='123')
res = self.client.get(reverse('users.pwreset_confirm',
args=self.token))
assert 'You can no longer change your password' not in res.content
def test_reset_attempt_migrated_waffle_off(self):
self.user.update(fxa_id='123')
assert not self.user.check_password('password1')
res = self.client.post(reverse('users.pwreset_confirm',
args=self.token),
data={'new_password1': 'password1',
'new_password2': 'password1'})
assert self.user.reload().check_password('password1')
assert 'You can no longer change your password' not in res.content
def test_reset_msg_migrated_waffle_on(self):
self.create_switch('fxa-auth', active=True)
self.user.update(fxa_id='123')
res = self.client.get(reverse('users.pwreset_confirm',
args=self.token))
assert 'You can no longer change your password' in res.content
def test_reset_attempt_migrated_waffle_on(self):
self.create_switch('fxa-auth', active=True)
self.user.update(fxa_id='123')
assert not self.user.check_password('password1')
res = self.client.post(reverse('users.pwreset_confirm',
args=self.token),
data={'new_password1': 'password1',
'new_password2': 'password1'})
assert not self.user.reload().check_password('password1')
assert 'You can no longer change your password' in res.content
class TestSessionLength(UserViewBase):
def test_session_does_not_expire_quickly(self):
"""Make sure no one is overriding our settings and making sessions
expire at browser session end. See:
https://github.com/mozilla/addons-server/issues/1789
"""
self.client.login(username='jbalogh@mozilla.com', password='password')
r = self.client.get('/', follow=True)
cookie = r.cookies[settings.SESSION_COOKIE_NAME]
# The user's session should be valid for at least four weeks (near a
# month).
four_weeks_from_now = datetime.now() + timedelta(days=28)
expiry = parse(cookie['expires']).replace(tzinfo=None)
assert cookie.value != ''
assert expiry >= four_weeks_from_now
class TestLogout(UserViewBase):
def test_success(self):
user = UserProfile.objects.get(email='jbalogh@mozilla.com')
self.client.login(username=user.email, password='password')
r = self.client.get('/', follow=True)
assert pq(r.content.decode('utf-8'))('.account .user').text() == (
user.display_name)
assert pq(r.content)('.account .user').attr('title') == user.email
r = self.client.get('/users/logout', follow=True)
assert not pq(r.content)('.account .user')
def test_redirect(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
self.client.get('/', follow=True)
url = '/en-US/about'
r = self.client.get(urlparams(reverse('users.logout'), to=url),
follow=True)
self.assert3xx(r, url, status_code=302)
url = urlparams(reverse('users.logout'), to='/addon/new',
domain='builder')
r = self.client.get(url, follow=True)
to, code = r.redirect_chain[0]
assert to == 'https://builder.addons.mozilla.org/addon/new'
assert code == 302
# Test an invalid domain
url = urlparams(reverse('users.logout'), to='/en-US/about',
domain='http://evil.com')
r = self.client.get(url, follow=True)
self.assert3xx(r, '/en-US/about', status_code=302)
def test_session_cookie_deleted_on_logout(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
r = self.client.get(reverse('users.logout'))
cookie = r.cookies[settings.SESSION_COOKIE_NAME]
assert cookie.value == ''
assert cookie['expires'] == u'Thu, 01-Jan-1970 00:00:00 GMT'
class TestRegistration(UserViewBase):
def test_new_confirm(self):
# User doesn't have a confirmation code.
url = reverse('users.confirm', args=[self.user.id, 'code'])
r = self.client.get(url, follow=True)
is_anonymous = pq(r.content)('body').attr('data-anonymous')
assert json.loads(is_anonymous)
self.user.update(confirmationcode='code')
# URL has the wrong confirmation code.
url = reverse('users.confirm', args=[self.user.id, 'blah'])
r = self.client.get(url, follow=True)
self.assertContains(r, 'Invalid confirmation code!')
# URL has the right confirmation code.
url = reverse('users.confirm', args=[self.user.id, 'code'])
r = self.client.get(url, follow=True)
self.assertContains(r, 'Successfully verified!')
def test_new_confirm_resend(self):
# User doesn't have a confirmation code.
url = reverse('users.confirm.resend', args=[self.user.id])
r = self.client.get(url, follow=True)
self.user.update(confirmationcode='code')
# URL has the right confirmation code now.
r = self.client.get(url, follow=True)
self.assertContains(r, 'An email has been sent to your address')
def test_default_lang(self):
"""When a user registers, set its lang to the current locale."""
with self.activate('fr'):
url = reverse('users.register')
self.client.post(url, data={'email': 'new@example.com',
'username': 'new',
'password': 'foobarbaz',
'password2': 'foobarbaz'})
user = UserProfile.objects.get(email='new@example.com')
assert user.lang == 'fr'
def test_fxa_auth_enabled(self):
"""When FxA is enabled it should render the login page."""
amo.tests.create_switch('fxa-auth', active=True)
response = self.client.get(reverse('users.register'))
self.assertContains(response, 'Enter your email')
class TestProfileView(UserViewBase):
def setUp(self):
super(TestProfileView, self).setUp()
self.user = UserProfile.objects.create(homepage='http://example.com')
self.url = reverse('users.profile', args=[self.user.id])
def test_non_developer_homepage_url(self):
"""Don't display homepage url if the user is not a developer."""
r = self.client.get(self.url)
self.assertNotContains(r, self.user.homepage)
@patch.object(UserProfile, 'is_developer', True)
def test_developer_homepage_url(self):
"""Display homepage url for a developer user."""
r = self.client.get(self.url)
self.assertContains(r, self.user.homepage)
class TestProfileLinks(UserViewBase):
fixtures = ['base/appversion', 'base/featured', 'users/test_backends']
def test_edit_buttons(self):
"""Ensure admin/user edit buttons are shown."""
def get_links(id):
"""Grab profile, return edit links."""
url = reverse('users.profile', args=[id])
r = self.client.get(url)
return pq(r.content)('#profile-actions a')
# Anonymous user.
links = get_links(self.user.id)
assert links.length == 1
assert links.eq(0).attr('href') == reverse(
'users.abuse', args=[self.user.id])
# Non-admin, someone else's profile.
self.client.login(username='jbalogh@mozilla.com', password='password')
links = get_links(9945)
assert links.length == 1
assert links.eq(0).attr('href') == reverse('users.abuse', args=[9945])
# Non-admin, own profile.
links = get_links(self.user.id)
assert links.length == 1
assert links.eq(0).attr('href') == reverse('users.edit')
# Admin, someone else's profile.
admingroup = Group(rules='Users:Edit')
admingroup.save()
GroupUser.objects.create(group=admingroup, user=self.user)
cache.clear()
# Admin, own profile.
links = get_links(self.user.id)
assert links.length == 2
assert links.eq(0).attr('href') == reverse('users.edit')
# TODO XXX Uncomment when we have real user editing pages
# assert links.eq(1).attr('href') + "/" == (
# reverse('admin:users_userprofile_change', args=[self.user.id]))
def test_user_properties(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
response = self.client.get(reverse('home'))
request = response.context['request']
assert hasattr(request.user, 'mobile_addons')
assert hasattr(request.user, 'favorite_addons')
class TestProfileSections(TestCase):
fixtures = ['base/users', 'base/addon_3615',
'base/addon_5299_gcal', 'base/collections',
'reviews/dev-reply']
def setUp(self):
super(TestProfileSections, self).setUp()
self.user = UserProfile.objects.get(id=10482)
self.url = reverse('users.profile', args=[self.user.id])
def test_mine_anonymous(self):
res = self.client.get('/user/me/', follow=True)
assert res.status_code == 404
def test_mine_authenticated(self):
self.login(self.user)
res = self.client.get('/user/me/', follow=True)
assert res.status_code == 200
assert res.context['user'].id == self.user.id
def test_my_last_login_anonymous(self):
res = self.client.get(self.url)
assert res.status_code == 200
doc = pq(res.content)
assert doc('.last-login-time').length == 0
assert doc('.last-login-ip').length == 0
def test_my_last_login_authenticated(self):
self.user.update(last_login_ip='255.255.255.255')
self.login(self.user)
res = self.client.get(self.url)
assert res.status_code == 200
doc = pq(res.content)
assert doc('.last-login-time td').text()
assert doc('.last-login-ip td').text() == '255.255.255.255'
def test_not_my_last_login(self):
res = self.client.get('/user/999/', follow=True)
assert res.status_code == 200
doc = pq(res.content)
assert doc('.last-login-time').length == 0
assert doc('.last-login-ip').length == 0
def test_my_addons(self):
assert pq(self.client.get(self.url).content)(
'.num-addons a').length == 0
AddonUser.objects.create(user=self.user, addon_id=3615)
AddonUser.objects.create(user=self.user, addon_id=5299)
r = self.client.get(self.url)
a = r.context['addons'].object_list
assert list(a) == sorted(a, key=lambda x: x.weekly_downloads,
reverse=True)
doc = pq(r.content)
assert doc('.num-addons a[href="#my-submissions"]').length == 1
items = doc('#my-addons .item')
assert items.length == 2
assert items('.install[data-addon=3615]').length == 1
assert items('.install[data-addon=5299]').length == 1
def test_my_unlisted_addons(self):
"""I can't see my own unlisted addons on my profile page."""
assert pq(self.client.get(self.url).content)(
'.num-addons a').length == 0
AddonUser.objects.create(user=self.user, addon_id=3615)
Addon.objects.get(pk=5299).update(is_listed=False)
AddonUser.objects.create(user=self.user, addon_id=5299)
r = self.client.get(self.url)
assert list(r.context['addons'].object_list) == [
Addon.objects.get(pk=3615)]
doc = pq(r.content)
items = doc('#my-addons .item')
assert items.length == 1
assert items('.install[data-addon=3615]').length == 1
def test_not_my_unlisted_addons(self):
"""I can't see others' unlisted addons on their profile pages."""
res = self.client.get('/user/999/', follow=True)
assert pq(res.content)('.num-addons a').length == 0
user = UserProfile.objects.get(pk=999)
AddonUser.objects.create(user=user, addon_id=3615)
Addon.objects.get(pk=5299).update(is_listed=False)
AddonUser.objects.create(user=user, addon_id=5299)
r = self.client.get('/user/999/', follow=True)
assert list(r.context['addons'].object_list) == [
Addon.objects.get(pk=3615)]
doc = pq(r.content)
items = doc('#my-addons .item')
assert items.length == 1
assert items('.install[data-addon=3615]').length == 1
def test_my_personas(self):
assert pq(self.client.get(self.url).content)(
'.num-addons a').length == 0
a = amo.tests.addon_factory(type=amo.ADDON_PERSONA)
AddonUser.objects.create(user=self.user, addon=a)
r = self.client.get(self.url)
doc = pq(r.content)
items = doc('#my-themes .persona')
assert items.length == 1
assert items('a[href="%s"]' % a.get_url_path()).length == 1
def test_my_reviews(self):
r = Review.objects.filter(reply_to=None)[0]
r.update(user=self.user)
cache.clear()
self.assertSetEqual(self.user.reviews, [r])
r = self.client.get(self.url)
doc = pq(r.content)('#reviews')
assert not doc.hasClass('full'), (
'reviews should not have "full" class when there are collections')
assert doc('.item').length == 1
assert doc('#review-218207').length == 1
# Edit Review form should be present.
self.assertTemplateUsed(r, 'reviews/edit_review.html')
def test_my_reviews_delete_link(self):
review = Review.objects.filter(reply_to=None)[0]
review.user_id = 999
review.save()
cache.clear()
slug = Addon.objects.get(id=review.addon_id).slug
delete_url = reverse('addons.reviews.delete', args=[slug, review.pk])
def _get_reviews(username, password):
self.client.login(username=username, password=password)
r = self.client.get(reverse('users.profile', args=[999]))
doc = pq(r.content)('#reviews')
return doc('#review-218207 .item-actions a.delete-review')
# Admins get the Delete Review link.
r = _get_reviews(username='admin@mozilla.com', password='password')
assert r.length == 1
assert r.attr('href') == delete_url
# Editors get the Delete Review link.
r = _get_reviews(username='editor@mozilla.com', password='password')
assert r.length == 1
assert r.attr('href') == delete_url
# Author gets the Delete Review link.
r = _get_reviews(username='regular@mozilla.com', password='password')
assert r.length == 1
assert r.attr('href') == delete_url
# Other user does not get the Delete Review link.
r = _get_reviews(username='clouserw@gmail.com', password='password')
assert r.length == 0
def test_my_reviews_no_pagination(self):
r = self.client.get(self.url)
assert len(self.user.addons_listed) <= 10, (
'This user should have fewer than 10 add-ons.')
assert pq(r.content)('#my-addons .paginator').length == 0
def test_my_reviews_pagination(self):
for i in xrange(20):
AddonUser.objects.create(user=self.user, addon_id=3615)
assert len(self.user.addons_listed) > 10, (
'This user should have way more than 10 add-ons.')
r = self.client.get(self.url)
assert pq(r.content)('#my-addons .paginator').length == 1
def test_my_collections_followed(self):
coll = Collection.objects.all()[0]
CollectionWatcher.objects.create(collection=coll, user=self.user)
mine = Collection.objects.listed().filter(following__user=self.user)
assert list(mine) == [coll]
r = self.client.get(self.url)
self.assertTemplateUsed(r, 'bandwagon/users/collection_list.html')
assert list(r.context['fav_coll']) == [coll]
doc = pq(r.content)
assert doc('#reviews.full').length == 0
ul = doc('#my-collections #my-favorite')
assert ul.length == 1
li = ul.find('li')
assert li.length == 1
a = li.find('a')
assert a.attr('href') == coll.get_url_path()
assert a.text() == unicode(coll.name)
def test_my_collections_created(self):
coll = Collection.objects.listed().filter(author=self.user)
assert len(coll) == 1
r = self.client.get(self.url)
self.assertTemplateUsed(r, 'bandwagon/users/collection_list.html')
self.assertSetEqual(r.context['own_coll'], coll)
doc = pq(r.content)
assert doc('#reviews.full').length == 0
ul = doc('#my-collections #my-created')
assert ul.length == 1
li = ul.find('li')
assert li.length == 1
a = li.find('a')
assert a.attr('href') == coll[0].get_url_path()
assert a.text() == unicode(coll[0].name)
def test_no_my_collections(self):
Collection.objects.filter(author=self.user).delete()
r = self.client.get(self.url)
self.assertTemplateNotUsed(r, 'bandwagon/users/collection_list.html')
doc = pq(r.content)
assert doc('#my-collections').length == 0
assert doc('#reviews.full').length == 1
def test_review_abuse_form(self):
r = self.client.get(self.url)
self.assertTemplateUsed(r, 'reviews/report_review.html')
def test_user_abuse_form(self):
abuse_url = reverse('users.abuse', args=[self.user.id])
r = self.client.get(self.url)
doc = pq(r.content)
button = doc('#profile-actions #report-user-abuse')
assert button.length == 1
assert button.attr('href') == abuse_url
modal = doc('#popup-staging #report-user-modal.modal')
assert modal.length == 1
assert modal('form').attr('action') == abuse_url
assert modal('textarea[name=text]').length == 1
self.assertTemplateUsed(r, 'users/report_abuse.html')
def test_no_self_abuse(self):
self.client.login(username='clouserw@gmail.com', password='password')
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('#profile-actions #report-user-abuse').length == 0
assert doc('#popup-staging #report-user-modal.modal').length == 0
self.assertTemplateNotUsed(r, 'users/report_abuse.html')
class TestThemesProfile(TestCase):
fixtures = ['base/user_2519']
def setUp(self):
super(TestThemesProfile, self).setUp()
self.user = UserProfile.objects.get(pk=2519)
self.url = self.user.get_user_url('themes')
def _test_good(self, res):
assert res.status_code == 200
ids = res.context['addons'].object_list.values_list('id', flat=True)
self.assertSetEqual(ids, [self.theme.id])
doc = pq(res.content)
assert doc('.no-results').length == 0
results = doc('.personas-grid .persona.hovercard')
assert results.length == 1
assert results.find('h3').text() == unicode(self.theme.name)
def test_bad_user(self):
res = self.client.get(reverse('users.themes', args=['yolo']))
assert res.status_code == 404
def test_no_themes(self):
res = self.client.get(self.url)
assert res.status_code == 200
assert pq(res.content)('.no-results').length == 1
def test_themes(self):
self.theme = amo.tests.addon_factory(type=amo.ADDON_PERSONA)
self.theme.addonuser_set.create(user=self.user, listed=True)
res = self.client.get(self.url)
self._test_good(res)
def test_bad_category(self):
res = self.client.get(reverse('users.themes', args=['yolo', 'swag']))
assert res.status_code == 404
def test_empty_category(self):
self.theme = amo.tests.addon_factory(type=amo.ADDON_PERSONA)
self.theme.addonuser_set.create(user=self.user, listed=True)
cat = Category.objects.create(type=amo.ADDON_PERSONA, slug='swag')
res = self.client.get(
self.user.get_user_url('themes', args=[cat.slug]))
assert res.status_code == 200
def test_themes_category(self):
self.theme = amo.tests.addon_factory(type=amo.ADDON_PERSONA)
self.theme.addonuser_set.create(user=self.user, listed=True)
cat = Category.objects.create(type=amo.ADDON_PERSONA, slug='swag')
self.theme.addoncategory_set.create(category=cat)
res = self.client.get(
self.user.get_user_url('themes', args=[cat.slug]))
self._test_good(res)
@patch.object(settings, 'NOBOT_RECAPTCHA_PRIVATE_KEY', 'something')
class TestReportAbuse(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestReportAbuse, self).setUp()
self.full_page = reverse('users.abuse', args=[10482])
@patch('olympia.amo.fields.ReCaptchaField.clean')
def test_abuse_anonymous(self, clean):
clean.return_value = ""
self.client.post(self.full_page, {'text': 'spammy'})
assert len(mail.outbox) == 1
assert 'spammy' in mail.outbox[0].body
report = AbuseReport.objects.get(user=10482)
assert report.message == 'spammy'
assert report.reporter is None
def test_abuse_anonymous_fails(self):
r = self.client.post(self.full_page, {'text': 'spammy'})
assert 'recaptcha' in r.context['abuse_form'].errors
def test_abuse_logged_in(self):
self.client.login(username='regular@mozilla.com', password='password')
self.client.post(self.full_page, {'text': 'spammy'})
assert len(mail.outbox) == 1
assert 'spammy' in mail.outbox[0].body
report = AbuseReport.objects.get(user=10482)
assert report.message == 'spammy'
assert report.reporter.email == 'regular@mozilla.com'
r = self.client.get(self.full_page)
assert pq(r.content)('.notification-box h2').length == 1
class BaseTestMigrateView(TestCase):
fixtures = ['base/users']
def setUp(self):
super(BaseTestMigrateView, self).setUp()
self.create_switch('fxa-auth', active=True)
def login(self):
username = 'regular@mozilla.com'
self.client.login(username=username, password='password')
return UserProfile.objects.get(email=username)
def login_migrated(self):
self.login().update(fxa_id='99')
class TestMigrateViewUnauthenticated(BaseTestMigrateView):
def test_404_without_waffle(self):
switch = Switch.objects.get(name='fxa-auth')
switch.active = False
switch.save()
response = self.client.get(migrate_path())
assert response.status_code == 404
def test_redirects_to_root_without_next_path(self):
response = self.client.get(migrate_path())
self.assertRedirects(response, reverse('home'))
def test_redirects_to_root_with_unsafe_next_path(self):
response = self.client.get(migrate_path('https://example.com/wat'))
self.assertRedirects(response, reverse('home'))
def test_redirects_to_next_with_safe_next_path(self):
response = self.client.get(migrate_path('/en-US/firefox/a/place'))
self.assertRedirects(
response, '/en-US/firefox/a/place', target_status_code=404)
class TestMigrateViewNotMigrated(BaseTestMigrateView):
def setUp(self):
super(TestMigrateViewNotMigrated, self).setUp()
self.login()
def test_renders_the_prompt(self):
response = self.client.get(migrate_path())
assert response.status_code == 200
assert 'Migrate to Firefox Accounts' in response.content
doc = pq(response.content)
assert doc('.skip-migrate-link a')[0].get('href') == reverse('home')
def test_skip_link_goes_to_next_path_when_next_path_is_safe(self):
response = self.client.get(migrate_path('/some/path'))
assert response.status_code == 200
assert 'Migrate to Firefox Accounts' in response.content
doc = pq(response.content)
assert doc('.skip-migrate-link a')[0].get('href') == '/some/path'
def test_skip_link_goes_to_root_when_next_path_is_unsafe(self):
response = self.client.get(
migrate_path('https://example.com/some/path'))
assert response.status_code == 200
assert 'Migrate to Firefox Accounts' in response.content
doc = pq(response.content)
assert doc('.skip-migrate-link a')[0].get('href') == reverse('home')
class TestMigrateViewMigratedUser(BaseTestMigrateView):
def setUp(self):
super(TestMigrateViewMigratedUser, self).setUp()
self.login_migrated()
def test_redirects_to_root_when_migrated(self):
response = self.client.get(migrate_path())
self.assertRedirects(response, reverse('home'))
def test_redirects_to_next_when_migrated_safe_next(self):
response = self.client.get(migrate_path('/en-US/firefox/go/here'))
self.assertRedirects(
response, '/en-US/firefox/go/here', target_status_code=404)
def test_redirects_to_root_when_migrated_unsafe_next(self):
response = self.client.get(migrate_path('https://example.com/uh/oh'))
self.assertRedirects(response, reverse('home'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.