repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Averroes/statsmodels | statsmodels/stats/tests/test_multi.py | 27 | 18590 | '''Tests for multipletests and fdr pvalue corrections
Author : Josef Perktold
['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n', 'fdr_tsbh']
are tested against R:multtest
'hommel' is tested against R stats p_adjust (not available in multtest
'fdr_gbs', 'fdr_2sbky' I did not find them in R, currently tested for
consistency only
'''
from statsmodels.compat.python import iteritems
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_allclose)
from statsmodels.stats.multitest import (multipletests, fdrcorrection,
fdrcorrection_twostage)
from statsmodels.stats.multicomp import tukeyhsd
pval0 = np.array([0.838541367553 , 0.642193923795 , 0.680845947633 ,
0.967833824309 , 0.71626938238 , 0.177096952723 , 5.23656777208e-005 ,
0.0202732688798 , 0.00028140506198 , 0.0149877310796])
res_multtest1 = np.array([[ 5.2365677720800003e-05, 5.2365677720800005e-04,
5.2365677720800005e-04, 5.2365677720800005e-04,
5.2353339704891422e-04, 5.2353339704891422e-04,
5.2365677720800005e-04, 1.5337740764175588e-03],
[ 2.8140506198000000e-04, 2.8140506197999998e-03,
2.5326455578199999e-03, 2.5326455578199999e-03,
2.8104897961789277e-03, 2.5297966317768816e-03,
1.4070253098999999e-03, 4.1211324652269442e-03],
[ 1.4987731079600001e-02, 1.4987731079600000e-01,
1.1990184863680001e-01, 1.1990184863680001e-01,
1.4016246580579017e-01, 1.1379719679449507e-01,
4.9959103598666670e-02, 1.4632862843720582e-01],
[ 2.0273268879800001e-02, 2.0273268879799999e-01,
1.4191288215860001e-01, 1.4191288215860001e-01,
1.8520270949069695e-01, 1.3356756197485375e-01,
5.0683172199499998e-02, 1.4844940238274187e-01],
[ 1.7709695272300000e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
8.5760763426056130e-01, 6.8947825122356643e-01,
3.5419390544599999e-01, 1.0000000000000000e+00],
[ 6.4219392379499995e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9996560644133570e-01, 9.9413539782557070e-01,
8.9533672797500008e-01, 1.0000000000000000e+00],
[ 6.8084594763299999e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9998903512635740e-01, 9.9413539782557070e-01,
8.9533672797500008e-01, 1.0000000000000000e+00],
[ 7.1626938238000004e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9999661886871472e-01, 9.9413539782557070e-01,
8.9533672797500008e-01, 1.0000000000000000e+00],
[ 8.3854136755300002e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9999998796038225e-01, 9.9413539782557070e-01,
9.3171263061444454e-01, 1.0000000000000000e+00],
[ 9.6783382430900000e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9999999999999878e-01, 9.9413539782557070e-01,
9.6783382430900000e-01, 1.0000000000000000e+00]])
res_multtest2_columns = ['rawp', 'Bonferroni', 'Holm', 'Hochberg', 'SidakSS', 'SidakSD',
'BH', 'BY', 'ABH', 'TSBH_0.05']
rmethods = {'rawp':(0,'pval'), 'Bonferroni':(1,'b'), 'Holm':(2,'h'),
'Hochberg':(3,'sh'), 'SidakSS':(4,'s'), 'SidakSD':(5,'hs'),
'BH':(6,'fdr_i'), 'BY':(7,'fdr_n'),
'TSBH_0.05':(9, 'fdr_tsbh')}
NA = np.nan
# all rejections, except for Bonferroni and Sidak
res_multtest2 = np.array([
0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.012, 0.024, 0.036, 0.048,
0.06, 0.072, 0.012, 0.02, 0.024, 0.024, 0.024, 0.024, 0.012, 0.012,
0.012, 0.012, 0.012, 0.012, 0.01194015976019192, 0.02376127616613988,
0.03546430060660932, 0.04705017875634587, 0.058519850599,
0.06987425045000606, 0.01194015976019192, 0.01984063872102404,
0.02378486270400004, 0.023808512, 0.023808512, 0.023808512, 0.012,
0.012, 0.012, 0.012, 0.012, 0.012, 0.0294, 0.0294, 0.0294, 0.0294,
0.0294, 0.0294, NA, NA, NA, NA, NA, NA, 0, 0, 0, 0, 0, 0
]).reshape(6,10, order='F')
res_multtest3 = np.array([
0.001, 0.002, 0.003, 0.004, 0.005, 0.05, 0.06, 0.07, 0.08, 0.09, 0.01,
0.02, 0.03, 0.04, 0.05, 0.5, 0.6, 0.7, 0.8, 0.9, 0.01, 0.018, 0.024,
0.028, 0.03, 0.25, 0.25, 0.25, 0.25, 0.25, 0.01, 0.018, 0.024, 0.028,
0.03, 0.09, 0.09, 0.09, 0.09, 0.09, 0.00995511979025177,
0.01982095664805061, 0.02959822305108317, 0.03928762649718986,
0.04888986953422814, 0.4012630607616213, 0.4613848859051006,
0.5160176928207072, 0.5656115457763677, 0.6105838818818925,
0.00995511979025177, 0.0178566699880266, 0.02374950634358763,
0.02766623106147537, 0.02962749064373438, 0.2262190625000001,
0.2262190625000001, 0.2262190625000001, 0.2262190625000001,
0.2262190625000001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.08333333333333334,
0.0857142857142857, 0.0875, 0.0888888888888889, 0.09,
0.02928968253968254, 0.02928968253968254, 0.02928968253968254,
0.02928968253968254, 0.02928968253968254, 0.2440806878306878,
0.2510544217687075, 0.2562847222222222, 0.2603527336860670,
0.2636071428571428, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0.005,
0.005, 0.005, 0.005, 0.005, 0.04166666666666667, 0.04285714285714286,
0.04375, 0.04444444444444445, 0.045
]).reshape(10,10, order='F')
res0_large = np.array([
0.00031612, 0.0003965, 0.00048442, 0.00051932, 0.00101436, 0.00121506,
0.0014516, 0.00265684, 0.00430043, 0.01743686, 0.02080285, 0.02785414,
0.0327198, 0.03494679, 0.04206808, 0.08067095, 0.23882767, 0.28352304,
0.36140401, 0.43565145, 0.44866768, 0.45368782, 0.48282088,
0.49223781, 0.55451638, 0.6207473, 0.71847853, 0.72424145, 0.85950263,
0.89032747, 0.0094836, 0.011895, 0.0145326, 0.0155796, 0.0304308,
0.0364518, 0.043548, 0.0797052, 0.1290129, 0.5231058, 0.6240855,
0.8356242, 0.981594, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0.0094836, 0.0114985, 0.01356376, 0.01402164, 0.02637336,
0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406, 0.416057,
0.52922866, 0.5889564, 0.59409543, 0.67308928, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0.0094836, 0.0114985, 0.01356376, 0.01402164,
0.02637336, 0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406,
0.416057, 0.52922866, 0.5889564, 0.59409543, 0.67308928, 0.89032747,
0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,
0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,
0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.009440257627368331,
0.01182686507401931, 0.01443098172617119, 0.01546285007478554,
0.02998742566629453, 0.03581680249125385, 0.04264369065603335,
0.0767094173291795, 0.1212818694859857, 0.410051586220387,
0.4677640287633493, 0.5715077903157826, 0.631388450393325,
0.656016359012282, 0.724552174001554, 0.919808283456286,
0.999721715014484, 0.9999547032674126, 0.9999985652190126,
0.999999964809746, 0.999999982525548, 0.999999986719131,
0.999999997434160, 0.999999998521536, 0.999999999970829,
0.999999999999767, 1, 1, 1, 1, 0.009440257627368331,
0.01143489901147732, 0.0134754287611275, 0.01392738605848343,
0.0260416568490015, 0.02993768724817902, 0.0342629726119179,
0.0593542206208364, 0.09045742964699988, 0.308853956167216,
0.343245865702423, 0.4153483370083637, 0.4505333180190900,
0.453775200643535, 0.497247406680671, 0.71681858015803,
0.978083969553718, 0.986889206426321, 0.995400461639735,
0.9981506396214986, 0.9981506396214986, 0.9981506396214986,
0.9981506396214986, 0.9981506396214986, 0.9981506396214986,
0.9981506396214986, 0.9981506396214986, 0.9981506396214986,
0.9981506396214986, 0.9981506396214986, 0.0038949, 0.0038949,
0.0038949, 0.0038949, 0.0060753, 0.0060753, 0.006221142857142857,
0.00996315, 0.01433476666666667, 0.05231058, 0.05673504545454545,
0.06963535, 0.07488597857142856, 0.07488597857142856, 0.08413616,
0.15125803125, 0.421460594117647, 0.4725384, 0.570637910526316,
0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625,
0.665419656, 0.7162468846153845, 0.775972982142857, 0.775972982142857,
0.889140651724138, 0.89032747, 0.01556007537622183,
0.01556007537622183, 0.01556007537622183, 0.01556007537622183,
0.02427074531648065, 0.02427074531648065, 0.02485338565390302,
0.0398026560334295, 0.0572672083580799, 0.2089800939109816,
0.2266557764630925, 0.2781923271071372, 0.2991685206792373,
0.2991685206792373, 0.336122876445059, 0.6042738882921044, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.00220711, 0.00220711, 0.00220711,
0.00220711, 0.00344267, 0.00344267, 0.003525314285714285, 0.005645785,
0.00812303444444444, 0.029642662, 0.0321498590909091,
0.03946003166666667, 0.04243538785714285, 0.04243538785714285,
0.0476771573333333, 0.085712884375, 0.23882767, 0.26777176,
0.323361482631579, 0.34866844875, 0.34866844875, 0.34866844875,
0.34866844875, 0.34866844875, 0.3770711384, 0.4058732346153846,
0.4397180232142857, 0.4397180232142857, 0.503846369310345,
0.504518899666667, 0.00272643, 0.00272643, 0.00272643, 0.00272643,
0.00425271, 0.00425271, 0.0043548, 0.006974205, 0.01003433666666667,
0.036617406, 0.03971453181818182, 0.048744745, 0.052420185,
0.052420185, 0.058895312, 0.105880621875, 0.295022415882353,
0.33077688, 0.399446537368421, 0.43070808375, 0.43070808375,
0.43070808375, 0.43070808375, 0.43070808375, 0.4657937592,
0.5013728192307692, 0.5431810875, 0.5431810875, 0.622398456206897,
0.623229229
]).reshape(30,10, order='F')
class CheckMultiTestsMixin(object):
def test_multi_pvalcorrection(self):
#test against R package multtest mt.rawp2adjp
res_multtest = self.res2
pval0 = res_multtest[:,0]
for k,v in iteritems(rmethods):
if v[1] in self.methods:
reject, pvalscorr = multipletests(pval0,
alpha=self.alpha,
method=v[1])[:2]
assert_almost_equal(pvalscorr, res_multtest[:,v[0]], 15)
assert_equal(reject, pvalscorr <= self.alpha)
pvalscorr = np.sort(fdrcorrection(pval0, method='n')[1])
assert_almost_equal(pvalscorr, res_multtest[:,7], 15)
pvalscorr = np.sort(fdrcorrection(pval0, method='i')[1])
assert_almost_equal(pvalscorr, res_multtest[:,6], 15)
class TestMultiTests1(CheckMultiTestsMixin):
def __init__(self):
self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']
self.alpha = 0.1
self.res2 = res_multtest1
class TestMultiTests2(CheckMultiTestsMixin):
# case: all hypothesis rejected (except 'b' and 's'
def __init__(self):
self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']
self.alpha = 0.05
self.res2 = res_multtest2
class TestMultiTests3(CheckMultiTestsMixin):
def __init__(self):
self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',
'fdr_tsbh']
self.alpha = 0.05
self.res2 = res0_large
class TestMultiTests4(CheckMultiTestsMixin):
# in simulations, all two stage fdr, fdr_tsbky, fdr_tsbh, fdr_gbs, have in
# some cases (cases with large Alternative) an FDR that looks too large
# this is the first case #rejected = 12, DGP : has 10 false
def __init__(self):
self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',
'fdr_tsbh']
self.alpha = 0.05
self.res2 = res_multtest3
def test_pvalcorrection_reject():
# consistency test for reject boolean and pvalscorr
for alpha in [0.01, 0.05, 0.1]:
for method in ['b', 's', 'sh', 'hs', 'h', 'hommel', 'fdr_i', 'fdr_n',
'fdr_tsbky', 'fdr_tsbh', 'fdr_gbs']:
for ii in range(11):
pval1 = np.hstack((np.linspace(0.0001, 0.0100, ii),
np.linspace(0.05001, 0.11, 10 - ii)))
# using .05001 instead of 0.05 to avoid edge case issue #768
reject, pvalscorr = multipletests(pval1, alpha=alpha,
method=method)[:2]
#print 'reject.sum', v[1], reject.sum()
msg = 'case %s %3.2f rejected:%d\npval_raw=%r\npvalscorr=%r' % (
method, alpha, reject.sum(), pval1, pvalscorr)
assert_equal(reject, pvalscorr <= alpha, err_msg=msg)
#yield assert_equal, reject, pvalscorr <= alpha #, msg
def test_hommel():
#tested agains R stats p_adjust(pval0, method='hommel')
pval0 = np.array(
[ 0.00116, 0.00924, 0.01075, 0.01437, 0.01784, 0.01918,
0.02751, 0.02871, 0.03054, 0.03246, 0.04259, 0.06879,
0.0691 , 0.08081, 0.08593, 0.08993, 0.09386, 0.09412,
0.09718, 0.09758, 0.09781, 0.09788, 0.13282, 0.20191,
0.21757, 0.24031, 0.26061, 0.26762, 0.29474, 0.32901,
0.41386, 0.51479, 0.52461, 0.53389, 0.56276, 0.62967,
0.72178, 0.73403, 0.87182, 0.95384])
result_ho = np.array(
[ 0.0464 , 0.25872 , 0.29025 ,
0.3495714285714286, 0.41032 , 0.44114 ,
0.57771 , 0.60291 , 0.618954 ,
0.6492 , 0.7402725000000001, 0.86749 ,
0.86749 , 0.8889100000000001, 0.8971477777777778,
0.8993 , 0.9175374999999999, 0.9175374999999999,
0.9175374999999999, 0.9175374999999999, 0.9175374999999999,
0.9175374999999999, 0.95384 , 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001])
rej, pvalscorr, _, _ = multipletests(pval0, alpha=0.1, method='ho')
assert_almost_equal(pvalscorr, result_ho, 15)
assert_equal(rej, result_ho < 0.1) #booleans
def test_fdr_bky():
# test for fdrcorrection_twostage
# example from BKY
pvals = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459,
0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000 ]
#no test for corrected p-values, but they are inherited
#same number of rejection as in BKY paper:
#single step-up:4, two-stage:8, iterated two-step:9
#also alpha_star is the same as theirs for TST
#print fdrcorrection0(pvals, alpha=0.05, method='indep')
#print fdrcorrection_twostage(pvals, alpha=0.05, iter=False)
res_tst = fdrcorrection_twostage(pvals, alpha=0.05, iter=False)
assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2],3) #alpha_star for stage 2
assert_equal(8, res_tst[0].sum())
#print fdrcorrection_twostage(pvals, alpha=0.05, iter=True)
def test_issorted():
# test that is_sorted keyword works correctly
# the fdrcorrection functions are tested indirectly
from statsmodels.stats.multitest import multitest_methods_names
# data generated as random numbers np.random.beta(0.2, 0.5, size=10)
pvals = np.array([31, 9958111, 7430818, 8653643, 9892855, 876, 2651691,
145836, 9931, 6174747]) * 1e-7
sortind = np.argsort(pvals)
sortrevind = sortind.argsort()
pvals_sorted = pvals[sortind]
for method in multitest_methods_names:
res1 = multipletests(pvals, method=method, is_sorted=False)
res2 = multipletests(pvals_sorted, method=method, is_sorted=True)
assert_equal(res2[0][sortrevind], res1[0])
assert_allclose(res2[0][sortrevind], res1[0], rtol=1e-10)
def test_tukeyhsd():
#example multicomp in R p 83
res = '''\
pair diff lwr upr p adj
P-M 8.150000 -10.037586 26.3375861 0.670063958
S-M -3.258333 -21.445919 14.9292527 0.982419709
T-M 23.808333 5.620747 41.9959194 0.006783701
V-M 4.791667 -13.395919 22.9792527 0.931020848
S-P -11.408333 -29.595919 6.7792527 0.360680099
T-P 15.658333 -2.529253 33.8459194 0.113221634
V-P -3.358333 -21.545919 14.8292527 0.980350080
T-S 27.066667 8.879081 45.2542527 0.002027122
V-S 8.050000 -10.137586 26.2375861 0.679824487
V-T -19.016667 -37.204253 -0.8290806 0.037710044
'''
res = np.array([[ 8.150000, -10.037586, 26.3375861, 0.670063958],
[-3.258333, -21.445919, 14.9292527, 0.982419709],
[23.808333, 5.620747, 41.9959194, 0.006783701],
[ 4.791667, -13.395919, 22.9792527, 0.931020848],
[-11.408333, -29.595919, 6.7792527, 0.360680099],
[15.658333, -2.529253, 33.8459194, 0.113221634],
[-3.358333, -21.545919, 14.8292527, 0.980350080],
[27.066667, 8.879081, 45.2542527, 0.002027122],
[ 8.050000, -10.137586, 26.2375861, 0.679824487],
[-19.016667, -37.204253, -0.8290806, 0.037710044]])
m_r = [94.39167, 102.54167, 91.13333, 118.20000, 99.18333]
myres = tukeyhsd(m_r, 6, 110.8, alpha=0.05, df=4)
pairs, reject, meandiffs, std_pairs, confint, q_crit = myres[:6]
assert_almost_equal(meandiffs, res[:, 0], decimal=5)
assert_almost_equal(confint, res[:, 1:3], decimal=2)
assert_equal(reject, res[:, 3]<0.05)
| bsd-3-clause |
QuintilianoB/Violent-Python-examples | Chapter 3/1.printProfile.py | 1 | 3542 | # SQLite connection with python based on chapter 3.
# Python 3.4
"""
A script for query on Skype Database.
The default location of Skype's main.db on linux is:
/home/$LINUX_USER_NAME/.Skype/$SKYPE_USER_NAME
"""
import sqlite3
import argparse
import os
def printProfile(skypeDB):
connection = sqlite3.connect(skypeDB)
cursor = connection.cursor()
cursor.execute("SELECT fullname, skypename, city, country, datetime(profile_timestamp, 'unixepoch') FROM Accounts;")
for row in cursor:
print("[*] --- Found Account ---")
print("[+]User: {0}".format(str(row[0])))
print("[+]Skype Username: {0}".format(str(row[1])))
print("[+]Location: {0} - {1}".format(str(row[2]), str(row[3])))
print("[+]Profile Date: {0}".format(str(row[4])))
cursor.close()
def printContacts(skypeDB):
connection = sqlite3.connect(skypeDB)
cursor = connection.cursor()
cursor.execute("SELECT displayname, skypename, city, country, phone_mobile, birthday FROM Contacts;")
for row in cursor:
print("[*] --- Found Contact ---")
print("[+]User: {0}".format(str(row[0])))
print("[+]Skype Username: {0}".format(str(row[1])))
if str(row[2]) != '' and str(row[2]) is not None:
print("[+]Location: {0} - {1}".format(str(row[2]), str(row[3])))
if str(row[4]) is not None:
print("[+]Mobile Number: {0}".format(str(row[4])))
if str(row[5]) is not None:
print("[+]Birthday: {0}".format(str(row[4])))
cursor.close()
def printCallLog(skypeDB):
connection = sqlite3.connect(skypeDB)
cursor = connection.cursor()
cursor.execute("SELECT datetime(begin_timestamp, 'unixepoch'), identity FROM calls, conversations WHERE calls.conv_dbid = conversations.id;")
print("[*] --- Found Calls ---")
for row in cursor:
print("[+]Time: {0} | Partner: {1}".format(str(row[0]), str(row[1])))
def printMessages(skypeDB):
connection = sqlite3.connect(skypeDB)
cursor = connection.cursor()
cursor.execute("SELECT datetime(timestamp, 'unixepoch'), dialog_partner, author, body_xml FROM Messages;")
print("[*] --- Found Contact ---")
for row in cursor:
try:
if 'parlist' not in str(row[3]):
if str(row[1]) != str(row[2]):
msgDirection = "To {0}:".format(str(row[1]))
else:
msgDirection = "From {0}:".format(str(row[2]))
print("Time: {0} - {1} {2}".format(str(row[0]), msgDirection, str(row[3])))
except:
pass
def main():
# Defines the options and the help menu.
parser = argparse.ArgumentParser(description="A Python SQLite parser for skype DB.")
parser.add_argument('DBfile', help="Skypes profile location.")
# Receives the argument sent by the user.
args = parser.parse_args()
dbfile = args.DBfile
# If target file was not set, prints the help menu from argparse and exits.
if dbfile is None:
print(parser.usage)
exit(0)
# Verifies if the given path is valid.
elif os.path.isdir(dbfile) is False:
print("[!] File does not exist: {0}".format(dbfile))
else:
# Join path + main.db and call each function defined above.
dbfile = os.path.join(dbfile, 'main.db')
printProfile(dbfile)
printContacts(dbfile)
printCallLog(dbfile)
printMessages(dbfile)
if __name__ == '__main__':
main()
| gpl-2.0 |
calvinfarias/IC2015-2 | BOOST/boost_1_61_0/tools/build/src/engine/bump_version.py | 71 | 2821 | #!/usr/bin/python
# This script is used to bump the bjam version. It takes a single argument, e.g
#
# ./bump_version.py 3.1.9
#
# and updates all the necessary files.
#
# Copyright 2006 Rene Rivera.
# Copyright 2005-2006 Vladimir Prus.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import os
import os.path
import re
import string
import sys
srcdir = os.path.abspath(os.path.dirname(__file__))
docdir = os.path.abspath(os.path.join(srcdir, "..", "doc"))
def edit(file, *replacements):
print(" '%s'..." % file)
f = open(file, 'r')
text = f.read()
f.close()
for (source, target) in replacements:
text, n = re.compile(source, re.MULTILINE).subn(target, text)
assert n > 0
f = open(file, 'w')
f.write(text)
f.close()
def make_edits(ver):
ver03 = (list(ver) + [0] * 3)[0:3]
ver02 = ver03[0:2]
join = lambda v, s : s.join(str(x) for x in v)
dotJoin = lambda v : join(v, ".")
print("Setting version to %s" % str(ver03))
edit(os.path.join(srcdir, "boost-jam.spec"),
('^(Version:) .*$', '\\1 %s' % dotJoin(ver03)))
edit(os.path.join(srcdir, "build.jam"),
('^(_VERSION_ =).* ;$', '\\1 %s ;' % join(ver03, " ")))
edit(os.path.join(docdir, "bjam.qbk"),
('(\[version).*(\])', '\\1: %s\\2' % dotJoin(ver03)),
('(\[def :version:).*(\])', '\\1 %s\\2' % dotJoin(ver03)))
edit(os.path.join(srcdir, "patchlevel.h"),
('^(#define VERSION_MAJOR) .*$', '\\1 %s' % ver03[0]),
('^(#define VERSION_MINOR) .*$', '\\1 %s' % ver03[1]),
('^(#define VERSION_PATCH) .*$', '\\1 %s' % ver03[2]),
('^(#define VERSION_MAJOR_SYM) .*$', '\\1 "%02d"' % ver03[0]),
('^(#define VERSION_MINOR_SYM) .*$', '\\1 "%02d"' % ver03[1]),
('^(#define VERSION_PATCH_SYM) .*$', '\\1 "%02d"' % ver03[2]),
('^(#define VERSION) .*$', '\\1 "%s"' % dotJoin(ver)),
('^(#define JAMVERSYM) .*$', '\\1 "JAMVERSION=%s"' % dotJoin(ver02)))
def main():
if len(sys.argv) < 2:
print("Expect new version as argument.")
sys.exit(1)
if len(sys.argv) > 3:
print("Too many arguments.")
sys.exit(1)
version = sys.argv[1].split(".")
if len(version) > 3:
print("Expect version argument in the format: <MAJOR>.<MINOR>.<PATCH>")
sys.exit(1)
try:
version = list(int(x) for x in version)
except ValueError:
print("Version values must be valid integers.")
sys.exit(1)
while version and version[-1] == 0:
version.pop()
if not version:
print("At least one of the version values must be positive.")
sys.exit()
make_edits(version)
if __name__ == '__main__':
main()
| mit |
mitchcapper/mythbox | resources/lib/IMDbPY/imdb/articles.py | 17 | 5840 | """
articles module (imdb package).
This module provides functions and data to handle in a smart way
articles (in various languages) at the beginning of movie titles.
Copyright 2009 Davide Alberani <da@erlug.linux.it>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at imdbpy-devel@lists.sourceforge.net; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'le', 'el', 'les', 'un', 'los', 'una', 'uno', 'unos',
'unas'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# imdbpy-devel@lists.sourceforge.net .
_LANG_COUNTRIES = {
'English': ('USA', 'UK', 'Canada', 'Ireland', 'Australia'),
'Italian': ('Italy',),
'Spanish': ('Spain', 'Mexico'),
'Portuguese': ('Portugal', 'Brazil'),
'Turkish': ('Turkey',),
#'German': ('Germany', 'East Germany', 'West Germany'),
#'French': ('France'),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in _LANG_COUNTRIES:
for country in _LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
| gpl-2.0 |
jcftang/ansible-modules-extras | cloud/amazon/sns_topic.py | 33 | 13805 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
description:
- The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
version_added: 2.0
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
options:
name:
description:
- The name or ARN of the SNS topic to converge
required: True
state:
description:
- Whether to create or destroy an SNS topic
required: False
default: present
choices: ["absent", "present"]
display_name:
description:
- Display name of the topic
required: False
default: None
policy:
description:
- Policy to apply to the SNS topic
required: False
default: None
delivery_policy:
description:
- Delivery policy to apply to the SNS topic
required: False
default: None
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
required: False
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
required: False
default: True
extends_documentation_fragment: aws
requirements: [ "boto" ]
"""
EXAMPLES = """
- name: Create alarm SNS topic
sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "my_email_address@example.com"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = '''
sns_arn:
description: The ARN of the topic you are modifying
type: string
sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
sns_topic:
description: Dict of sns topic details
type: dict
sample:
name: sns-topic-name
state: present
display_name: default
policy: {}
delivery_policy: {}
subscriptions_new: []
subscriptions_existing: []
subscriptions_deleted: []
subscriptions_added: []
subscriptions_purge': false
check_mode: false
topic_created: false
topic_deleted: false
attributes_set: []
'''
import sys
import time
import json
import re
try:
import boto.sns
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class SnsTopicManager(object):
""" Handles SNS Topic creation and destruction """
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params):
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_boto_connection()
self.changed = False
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.arn_topic = None
self.attributes_set = []
def _get_boto_connection(self):
try:
return connect_to_aws(boto.sns, self.region,
**self.aws_connect_params)
except BotoServerError, err:
self.module.fail_json(msg=err.message)
def _get_all_topics(self):
next_token = None
topics = []
while True:
try:
response = self.connection.get_all_topics(next_token)
except BotoServerError, err:
module.fail_json(msg=err.message)
topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
if not next_token:
break
return [t['TopicArn'] for t in topics]
def _arn_topic_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._get_all_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
self.changed = True
self.topic_created = True
if not self.check_mode:
self.connection.create_topic(self.name)
self.arn_topic = self._arn_topic_lookup()
while not self.arn_topic:
time.sleep(3)
self.arn_topic = self._arn_topic_lookup()
def _set_topic_attrs(self):
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
['Attributes']
if self.display_name and self.display_name != topic_attributes['DisplayName']:
self.changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
self.display_name)
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
self.changed = True
self.attributes_set.append('policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
json.dumps(self.policy))
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
self.changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
json.dumps(self.delivery_policy))
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _get_topic_subs(self):
next_token = None
while True:
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['Subscriptions'])
next_token = response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['NextToken']
if not next_token:
break
def _set_topic_subs(self):
subscriptions_existing_list = []
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
if self.subscriptions_existing:
for sub in self.subscriptions_existing:
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.append(sub_key)
if self.purge_subscriptions and sub_key not in desired_subscriptions and \
sub['SubscriptionArn'] != 'PendingConfirmation':
self.changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
for (protocol, endpoint) in desired_subscriptions:
if (protocol, endpoint) not in subscriptions_existing_list:
self.changed = True
self.subscriptions_added.append(sub)
if not self.check_mode:
self.connection.subscribe(self.arn_topic, protocol, endpoint)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
for sub in self.subscriptions_existing:
if sub['SubscriptionArn'] != 'PendingConfirmation':
self.subscriptions_deleted.append(sub['SubscriptionArn'])
self.changed = True
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
def _delete_topic(self):
self.topic_deleted = True
self.changed = True
if not self.check_mode:
self.connection.delete_topic(self.arn_topic)
def ensure_ok(self):
self.arn_topic = self._arn_topic_lookup()
if not self.arn_topic:
self._create_topic()
self._set_topic_attrs()
self._get_topic_subs()
self._set_topic_subs()
def ensure_gone(self):
self.arn_topic = self._arn_topic_lookup()
if self.arn_topic:
self._get_topic_subs()
if self.subscriptions_existing:
self._delete_subscriptions()
self._delete_topic()
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'display_name': self.display_name,
'policy': self.policy,
'delivery_policy': self.delivery_policy,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set
}
return info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present',
'absent']),
display_name=dict(type='str', required=False),
policy=dict(type='dict', required=False),
delivery_policy=dict(type='dict', required=False),
subscriptions=dict(default=[], type='list', required=False),
purge_subscriptions=dict(type='bool', default=True),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params)
if state == 'present':
sns_topic.ensure_ok()
elif state == 'absent':
sns_topic.ensure_gone()
sns_facts = dict(changed=sns_topic.changed,
sns_arn=sns_topic.arn_topic,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
prateeksan/python-design-patterns | structural/flyweight.py | 1 | 6940 | """The Flyweight Pattern
Notes:
If you work for a data science team or have to deal with big data, this pattern
will be particularly useful.
In situations wherein several objects have some common memory intensive state,
the flyweight pattern can be implemented to separate and store the memory
intensive state in a common shared object. In other words if object 'A'
maintains data 'x' and 'y' as state, and object 'B' maintains data 'y' and 'z',
we can create a flyweight object 'C' which stores 'y'. Now 'A' and 'B' do
not need to maintain separate instances of 'y', they can both share the
flyweight's state.
The following implementation uses the pattern to build a module which can handle
complex data requests with multiple queries to a database. Each ComplexRequest
accepts two kinds of queries - fresh data queries (FreshQuery) and historical
data queries (HistoricalQuery). Furthermore, it accepts a pointer to a cache for
historical queries. Assuming that historical data does not change, we have
implemented the HistoricalQuery class as a flyweight and the HistoricalDataCache
serves as shared state cache for the flyweight objects. A user can use this to
make complex queries that only query data which is fresh or previously not
fetched.
"""
class HistoricalDataCache:
"""Represents a local cache for historical data. It maps historical data to
unique hashes of their query strings. This ensures only one db read is
performed for each historical query.
"""
# Implemented as a hash map (dict) shared by all instances of the class.
_cache = {}
def get(self, query_string):
"""Hashes the query string using the _query_hash method and checks the
_cache's keys for the hash. If a key matching the has is found, its
value (which contains a query object representing the result of the
request) is returned to the caller. Else, the data is queried and
the response object is cached and returned to the caller.
"""
print("Checking cache for: {}".format(query_string))
query_hash = self._query_hash(query_string)
if not (query_hash in self.__class__._cache):
print("\t{}".format(
"Query result not previously cached. Caching and returning."
)
)
self.__class__._cache[query_hash] = HistoricalQuery(query_string)
else:
print("\t{}".format(
"Cached result found. Returning result from cache."
)
)
return self.__class__._cache[query_hash]
def _query_hash(self, query_string):
"""Creates a unique 10 digit integer hash of the query_string. This is
used as the unique key for the query in the _cache.
"""
return abs(hash(query_string)) % (10**10)
class Query:
"""Both fresh queries and historical queries share the same interface as
defined here. This may be changed depending on the use case. The reason
for using the FreshQuery and HistoricalQuery subclasses (defined below) is
to clearly separate the two types of queries and letting the user know which
queries are cacheable (or which have been returned from a cache).
"""
def __init__(self, query_string):
"""The self.data attribute stores the memory intensive state that we are
trying to manage. For this implementation, it makes sense to populate it
during initialization.
"""
self.query_string = query_string
self.data = self.get_data()
def get_data(self):
"""In a real use case, this would query the database and return a large
object containing the requisite data.
"""
return "Data for {}: {}".format(self.__class__.__name__,
self.query_string)
class FreshQuery(Query):
pass
class HistoricalQuery(Query):
pass
class ComplexRequest:
"""The end user of the module can use this to build complex data sets by
making and aggregating multiple queries. This implementation is
oversimplified for demonstrative purposes."""
def __init__(self, historical_queries, fresh_queries, historical_cache):
"""During initialization, all requisite queries are separated into two
types - historical and fresh queries. A pointer to the historical_cache
is also stored since it will be used to process historical queries.
"""
self.historical_queries = historical_queries
self.fresh_queries = fresh_queries
self.historical_cache = historical_cache
def get(self):
"""In a real use case, this method would be responsible for establishing
the relationship between fresh and historical data. This might involve
merging, joining, pivoting or grouping the multiple data sets.
"""
fresh_data = self._get_fresh_data()
historical_data = self._get_historical_data()
print("Merging the following data sets:")
print("\t" + "\n\t".join(fresh_data))
print("\t" + "\n\t".join(historical_data))
def _get_fresh_data(self):
"""We assume that fresh data always needs to be queried from the db for
each request.
"""
fresh_data = []
for query_string in self.fresh_queries:
query = FreshQuery(query_string)
fresh_data.append(query.data)
return fresh_data
def _get_historical_data(self):
"""This is where we can use the historical data cache and the power of
the flyweight pattern to recycle previously queried historical data.
"""
historical_data = []
for query_string in self.historical_queries:
query = self.historical_cache.get(query_string)
historical_data.append(query.data)
return historical_data
if __name__ == '__main__':
"""In this example, we make two complex requests that share a historical
query to get all data from the archive_2 table. Therefore this query is only
sent to the db once even though it is added to both requests.
"""
historical_cache = HistoricalDataCache()
request_1 = ComplexRequest(
historical_queries=[
"SELECT * FROM archive_1",
"SELECT * FROM archive_2"
],
fresh_queries = [
"SELECT * FROM live_1",
"SELECT * FROM live_2"
],
historical_cache = historical_cache
)
print("> Making request_1...")
data_1 = request_1.get()
print("\n")
print("> Making request_2...")
request_2 = ComplexRequest(
historical_queries=[
"SELECT * FROM archive_2",
"SELECT * FROM archive_3"
],
fresh_queries = [
"SELECT * FROM live_1",
"SELECT * FROM live_2"
],
historical_cache = historical_cache
)
data_2 = request_2.get()
| mit |
GaetanCambier/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/firedrive.py | 31 | 2604 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class FiredriveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?firedrive\.com/' + \
'(?:file|embed)/(?P<id>[0-9a-zA-Z]+)'
_FILE_DELETED_REGEX = r'<div class="removed_file_image">'
_TESTS = [{
'url': 'https://www.firedrive.com/file/FEB892FA160EBD01',
'md5': 'd5d4252f80ebeab4dc2d5ceaed1b7970',
'info_dict': {
'id': 'FEB892FA160EBD01',
'ext': 'flv',
'title': 'bbb_theora_486kbit.flv',
'thumbnail': 're:^http://.*\.jpg$',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://firedrive.com/file/%s' % video_id
webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
fields = dict(re.findall(r'''(?x)<input\s+
type="hidden"\s+
name="([^"]+)"\s+
value="([^"]*)"
''', webpage))
post = compat_urllib_parse.urlencode(fields)
req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Apparently, this header is required for confirmation to work.
req.add_header('Host', 'www.firedrive.com')
webpage = self._download_webpage(req, video_id,
'Downloading video page')
title = self._search_regex(r'class="external_title_left">(.+)</div>',
webpage, 'title')
thumbnail = self._search_regex(r'image:\s?"(//[^\"]+)', webpage,
'thumbnail', fatal=False)
if thumbnail is not None:
thumbnail = 'http:' + thumbnail
ext = self._search_regex(r'type:\s?\'([^\']+)\',',
webpage, 'extension', fatal=False)
video_url = self._search_regex(
r'file:\s?loadURL\(\'(http[^\']+)\'\),', webpage, 'file url')
formats = [{
'format_id': 'sd',
'url': video_url,
'ext': ext,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| gpl-3.0 |
jacquerie/inspire-next | inspirehep/modules/cache/providers/lock.py | 2 | 2301 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Cache for marking a record as locked."""
from __future__ import absolute_import, division, print_function
from invenio_cache import current_cache
class LockCache(object):
"""Lock cache for ``pid_value``"""
key_prefix = 'lock::'
def __init__(self):
"""Initialize the cache."""
def _prefix(self, key):
"""Set prefix to a key.
Args:
key (str): a key name.
Returns:
str: a key with the ``key_prefix`` prefix.
"""
return '{0}{1}'.format(self.key_prefix, key)
def get(self, key):
"""Get the key value.
Args:
key (str): a key name.
Returns:
str: the value of the given key.
"""
return current_cache.get(self._prefix(key))
def set(self, key, value, timeout=7200):
"""Set the key and value.
Args:
key (str): a key name.
value (str): a value.
timeout (int): a timeout time in seconds.
Returns:
bool: if the key is stored.
"""
return current_cache.set(
self._prefix(key), value, timeout=timeout)
def delete(self, key):
"""Delete the key.
Args:
key (str): a key name.
Returns:
bool: if the key is deleted.
"""
return current_cache.delete(self._prefix(key))
| gpl-3.0 |
CydarLtd/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg.py | 63 | 10824 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg
short_description: Create, Remove, and Update Asynchronous Mirror Groups
description:
- Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- The name of the async array you wish to target, or create.
- If C(state) is present and the name isn't found, it will attempt to create.
required: yes
secondaryArrayId:
description:
- The ID of the secondary array to be used in mirroing process
required: yes
syncIntervalMinutes:
description:
- The synchronization interval in minutes
required: no
default: 10
manualSync:
description:
- Setting this to true will cause other synchronization values to be ignored
required: no
default: no
recoveryWarnThresholdMinutes:
description:
- Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
required: no
default: 20
repoUtilizationWarnThreshold:
description:
- Recovery point warning threshold
required: no
default: 80
interfaceType:
description:
- The intended protocol to use if both Fibre and iSCSI are available.
choices:
- iscsi
- fibre
required: no
default: null
syncWarnThresholdMinutes:
description:
- The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
required: no
default: 10
state:
description:
- A C(state) of present will either create or update the async mirror group.
- A C(state) of absent will remove the async mirror group.
required: yes
"""
EXAMPLES = """
- name: AMG removal
na_eseries_amg:
state: absent
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
- name: AMG create
netapp_e_amg:
state: present
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
"""
RETURN = """
msg:
description: Successful removal
returned: success
type: string
sample: "Async mirror group removed."
msg:
description: Successful creation
returned: success
type: string
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
""" # NOQA
import json
from ansible.module_utils.basic import AnsibleModule, get_exception
from ansible.module_utils.netapp import request, eseries_host_argument_spec
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def has_match(module, ssid, api_url, api_pwd, api_usr, body):
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
label_exists = False
matches_spec = False
current_state = None
async_id = None
api_data = None
desired_name = body.get('name')
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
try:
rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception:
error = get_exception()
module.exit_json(exception="Error finding a match. Message: %s" % str(error))
for async_group in data:
if async_group['label'] == desired_name:
label_exists = True
api_data = async_group
async_id = async_group['groupRef']
current_state = dict(
syncIntervalMinutes=async_group['syncIntervalMinutes'],
syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
)
if current_state == desired_state:
matches_spec = True
return label_exists, matches_spec, api_data, async_id
def create_async(module, ssid, api_url, api_pwd, api_usr, body):
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
post_data = json.dumps(body)
try:
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception:
error = get_exception()
module.exit_json(exception="Exception while creating aysnc mirror group. Message: %s" % str(error))
return data
def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
if new_name:
desired_state['new_name'] = new_name
post_data = json.dumps(desired_state)
try:
rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
url_username=user, url_password=pwd)
except Exception:
error = get_exception()
module.exit_json(exception="Exception while updating async mirror group. Message: %s" % str(error))
return data
def remove_amg(module, ssid, api_url, pwd, user, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
try:
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
headers=HEADERS)
except Exception:
error = get_exception()
module.exit_json(exception="Exception while removing async mirror group. Message: %s" % str(error))
return
def main():
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
new_name=dict(required=False, type='str'),
secondaryArrayId=dict(required=True, type='str'),
syncIntervalMinutes=dict(required=False, default=10, type='int'),
manualSync=dict(required=False, default=False, type='bool'),
recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
state=dict(required=True, choices=['present', 'absent']),
syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
new_name = p.pop('new_name')
state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
if state == 'present':
if name_exists and spec_matches:
module.exit_json(changed=False, msg="Desired state met", **api_data)
elif name_exists and not spec_matches:
results = update_async(module, ssid, api_url, pwd, user,
p, new_name, async_id)
module.exit_json(changed=True,
msg="Async mirror group updated", async_id=async_id,
**results)
elif not name_exists:
results = create_async(module, ssid, api_url, user, pwd, p)
module.exit_json(changed=True, **results)
elif state == 'absent':
if name_exists:
remove_amg(module, ssid, api_url, pwd, user, async_id)
module.exit_json(changed=True, msg="Async mirror group removed.",
async_id=async_id)
else:
module.exit_json(changed=False,
msg="Async Mirror group: %s already absent" % p['name'])
if __name__ == '__main__':
main()
| gpl-3.0 |
NREL/OpenWARP | source/openwarpgui/openwarp/services.py | 3 | 30949 | # -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
Updated since version 1.3: OpenWarp - Add Logging Functionality
1. Added support for logging
Changes in version 1.4 (OPENWARP - FIX WAVE FREQUENCY AND DIRECTION CRASH BUG):
1. Changed the way we capture the output of the child process started.
This is solving the fact that output from child process get lost sometimes.
2. During the simulation, we don't run the solver if they was an error in the
preprocessing step.
Changes in version 1.5 (OPENWARP - PROVIDE A COMMAND LINE INTERFACE USING PYTHON):
Added some functions to reuse them in the cli interface.
"""
__author__ = "caoweiquan322, yedtoss, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2016 TopCoder Inc. All rights reserved."
__version__ = "1.5"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
try:
from capturer import CaptureOutput
except ImportError,e:
# failed to import experimental pty support
import contextlib
pass
import fnmatch
import h5py
import json
import contextlib
#
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in configuring the application.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
ConfigurationParameters = collections.namedtuple('ConfigurationParameters',
'logging_level clear_log_flag')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def _set_log_level(log_level):
"""
helper method to change level of logs depending of log_level
:param log_level: integer or string (10 for DEBUG, 20 for INFO)
:return: A message indicating success or not
"""
# setting logging level:
if log_level and str(log_level) in ["10", "20"]:
# Setting the root logger to that level
level = int(log_level)
logging.getLogger(__name__).info("Setting logging level to " +
logging.getLevelName(level))
logging.getLogger().setLevel(level)
out = "Logging successfully set to level " + logging.getLevelName(level)
logging.getLogger(__name__).info(out)
return out
else:
# nofifying user and setting up loggers to debug by default
logger = logging.getLogger(__name__)
out = ("Logging level unknown! Should be 10 (DEBUG) or 20 (INFO). Keeping current level of " +
logging.getLevelName(logging.getLogger().getEffectiveLevel()))
logger.warning(out)
return out
def _clear_log():
"""
clearing log on server
:param None
:return A message indicating success or not
"""
num = 1
MAX_NUM = 10000
while True:
path = os.path.join(LOG_FILE, str(num))
if num > MAX_NUM:
return num-1
if os.path.exists(path):
os.remove(path)
num += 1
else:
return num-1
return num-1
def apply_configuration(params):
# The logger object for logging.
logger = logging.getLogger(__name__)
signature = __name__ + '.apply_configuration(params)'
helper.log_entrance(logger, signature,
{'params': params})
# Checking parameters
helper.check_type_value(params, 'params', ConfigurationParameters, False)
helper.check_not_none_nor_empty(params.logging_level, 'params.logging_level')
helper.check_not_none_nor_empty(params.clear_log_flag, 'params.clear_log_flag')
try:
output = "\n" + _set_log_level(params.logging_level)
if params.clear_log_flag == "true":
num = _clear_log()
out = str(num) + " logs files were successfully cleared"
logging.getLogger(__name__).info(out)
output += "\n" + out
else:
out = "clearing logs were not enabled"
output += "\n" + out
logging.getLogger(__name__).info(out)
output += ("\n\n" + "The simulation and running time logs are saved in " +
LOG_FILE)
helper.log_exit(logger, signature, [output])
return output
except Exception as e:
# No logging when raising an exception. Another function will log it
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
# The logger object for logging.
logger = logging.getLogger(__name__)
signature = __name__ + '.generate_mesh()'
helper.log_entrance(logger, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
logger.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
logger.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read().splitlines()
helper.log_exit(logger, signature, [ret])
return ret
except Exception as e:
helper.log_exception(logger, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params, queue):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
# The logger object for logging.
logger = logging.getLogger(__name__)
signature = __name__ + '.simulate()'
helper.log_entrance(logger, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
# Bug solving in old h5py version: Creating the file first
hdf5_path = os.path.join(simulation_dir, 'db.hdf5')
utility.touch(hdf5_path)
with h5py.File(hdf5_path, "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
utility.mkdir_p(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': hdf5_path,
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
logger.debug('Start preProcessor function.')
ret = run_thread(preprocessor.run_as_process, (custom_config, queue), simulation_log_path)
output = ret["log"]
if ret["exitcode"] != 0:
logger.error('An error happened when running the preprocessor. The exit code is ' + str(ret["exitcode"]))
else:
logger.debug('Preprocessor successfully run')
logger.debug('End preProcessor function.')
logger.debug('Start solver function.')
ret = run_thread(solver.run_as_process, (custom_config, queue), simulation_log_path)
output += ret["log"]
if ret["exitcode"] != 0:
logger.error('An error happened when running the solver. The exit code is ' + str(ret["exitcode"]))
else:
logger.debug('Solver successfully run')
logger.debug('End solver function.')
helper.log_exit(logger, signature, output)
return output
except Exception as e:
helper.log_exception(logger, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params, queue):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
# The logger object for logging.
logger = logging.getLogger(__name__)
signature = __name__ + '.postprocess()'
helper.log_entrance(logger, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
logger.debug('Start postProcessor function.')
ret = run_thread(postprocessor.run_as_process, (custom_config, queue), postprocessing_log_path)
if ret["exitcode"] != 0:
logger.error('An error happened when running the postprocessor. The exit code is ' + str(ret["exitcode"]))
else:
logger.debug('postProcessor successfully run')
logger.debug('End postProcessor in subprocess.')
helper.log_exit(logger, signature, ret["log"])
return ret["log"]
except Exception as e:
helper.log_exception(logger, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
# The logger object for logging.
logger = logging.getLogger(__name__)
signature = __name__ + '.visualize()'
helper.log_entrance(logger, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
logger.debug('List of files to load:')
logger.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
logger.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
logger.debug('End launching ParaView in subprocess.')
helper.log_exit(logger, signature, None)
except Exception as e:
helper.log_exception(logger, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, log_path):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
if os.name =='nt':
# CaputureOutput was throwing errors in Windows, so using following methods
with capture() as out:
p=Process(target=func, args=args)
p.daemon = True
p.start()
p.join()
output=out
if log_path is not None:
with open(log_path, 'a') as log_file:
log_file.write(str(output))
logs_path = os.path.dirname(USER_DATA_DIRECTORY)+"\logs\logs.log"
return {"exitcode": p.exitcode, "log": "Logs for %s is at : %s \n \n" % ( func.__module__ , logs_path )}
with CaptureOutput() as capturer:
p = Process(target=func, args=args)
p.daemon = True
p.start()
p.join()
output = capturer.get_lines()
if log_path is not None:
with open(log_path, 'a') as log_file:
log_file.write(capturer.get_text())
return {"exitcode": p.exitcode, "log": output}
@contextlib.contextmanager
def capture():
import sys
from cStringIO import StringIO
oldout,olderr = sys.stdout, sys.stderr
try:
out=[StringIO(), StringIO()]
sys.stdout,sys.stderr = out
yield out
finally:
sys.stdout,sys.stderr = oldout,olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
# The logger object for logging.
logger = logging.getLogger(__name__)
signature = __name__ + '.prepare_dir()'
helper.log_entrance(logger, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(logger, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(logger, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
def construct_postprocess_parameters(json_str):
# Since this is a internal method. The parameters won't be logged.
json_obj = json_str
if isinstance(json_obj, basestring):
json_obj = json.JSONDecoder().decode(json_str)
para = PostprocessingParameters(**json_obj)
return para
def construct_simulation_parameters(json_str):
'''
Construct the simulation parameters from json string or object.
@param json_str: the json string or object to parse
@return: the parsed SimulationParameters object
'''
# Since this is a internal method. The parameters won't be logged.
json_obj = json_str
if isinstance(json_obj, basestring):
json_obj = json.JSONDecoder().decode(json_str)
para = SimulationParameters(**json_obj)
if para.floating_bodies is not None:
new_bodies = []
for body in para.floating_bodies:
new_bodies.append(FloatingBody(**body))
del para.floating_bodies[:]
para.floating_bodies.extend(new_bodies)
return para | apache-2.0 |
robertdale/tinkerpop | gremlin-python/src/main/python/gremlin_python/structure/io/graphsonV3d0.py | 2 | 24169 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import calendar
import datetime
import json
import uuid
import math
from collections import OrderedDict
from decimal import *
import logging
from datetime import timedelta
import six
from aenum import Enum
from isodate import parse_duration, duration_isoformat
from gremlin_python import statics
from gremlin_python.statics import FloatType, FunctionType, IntType, LongType, TypeType, DictType, ListType, SetType, SingleByte, ByteBufferType, SingleChar
from gremlin_python.process.traversal import Binding, Bytecode, Direction, P, TextP, Traversal, Traverser, TraversalStrategy, T
from gremlin_python.structure.graph import Edge, Property, Vertex, VertexProperty, Path
log = logging.getLogger(__name__)
# When we fall back to a superclass's serializer, we iterate over this map.
# We want that iteration order to be consistent, so we use an OrderedDict,
# not a dict.
_serializers = OrderedDict()
_deserializers = {}
class GraphSONTypeType(type):
def __new__(mcs, name, bases, dct):
cls = super(GraphSONTypeType, mcs).__new__(mcs, name, bases, dct)
if not name.startswith('_'):
if cls.python_type:
_serializers[cls.python_type] = cls
if cls.graphson_type:
_deserializers[cls.graphson_type] = cls
return cls
class GraphSONUtil(object):
TYPE_KEY = "@type"
VALUE_KEY = "@value"
@classmethod
def typedValue(cls, type_name, value, prefix="g"):
out = {cls.TYPE_KEY: cls.formatType(prefix, type_name)}
if value is not None:
out[cls.VALUE_KEY] = value
return out
@classmethod
def formatType(cls, prefix, type_name):
return "%s:%s" % (prefix, type_name)
# Read/Write classes split to follow precedence of the Java API
class GraphSONWriter(object):
def __init__(self, serializer_map=None):
"""
:param serializer_map: map from Python type to serializer instance implementing `dictify`
"""
self.serializers = _serializers.copy()
if serializer_map:
self.serializers.update(serializer_map)
def writeObject(self, objectData):
# to JSON
return json.dumps(self.toDict(objectData), separators=(',', ':'))
def toDict(self, obj):
"""
Encodes python objects in GraphSON type-tagged dict values
"""
try:
return self.serializers[type(obj)].dictify(obj, self)
except KeyError:
for key, serializer in self.serializers.items():
if isinstance(obj, key):
return serializer.dictify(obj, self)
if isinstance(obj, dict):
return dict((self.toDict(k), self.toDict(v)) for k, v in obj.items())
elif isinstance(obj, set):
return set([self.toDict(o) for o in obj])
elif isinstance(obj, list):
return [self.toDict(o) for o in obj]
else:
return obj
class GraphSONReader(object):
def __init__(self, deserializer_map=None):
"""
:param deserializer_map: map from GraphSON type tag to deserializer instance implementing `objectify`
"""
self.deserializers = _deserializers.copy()
if deserializer_map:
self.deserializers.update(deserializer_map)
def readObject(self, jsonData):
# from JSON
return self.toObject(json.loads(jsonData))
def toObject(self, obj):
"""
Unpacks GraphSON type-tagged dict values into objects mapped in self.deserializers
"""
if isinstance(obj, dict):
try:
return self.deserializers[obj[GraphSONUtil.TYPE_KEY]].objectify(obj[GraphSONUtil.VALUE_KEY], self)
except KeyError:
pass
return dict((self.toObject(k), self.toObject(v)) for k, v in obj.items())
elif isinstance(obj, set):
return set([self.toObject(o) for o in obj])
elif isinstance(obj, list):
return [self.toObject(o) for o in obj]
else:
return obj
@six.add_metaclass(GraphSONTypeType)
class _GraphSONTypeIO(object):
python_type = None
graphson_type = None
symbolMap = {"global_": "global", "as_": "as", "in_": "in", "and_": "and",
"or_": "or", "is_": "is", "not_": "not", "from_": "from",
"set_": "set", "list_": "list", "all_": "all", "with_": "with",
"filter_": "filter", "id_": "id", "max_": "max", "min_": "min", "sum_": "sum"}
@classmethod
def unmangleKeyword(cls, symbol):
return cls.symbolMap.get(symbol, symbol)
def dictify(self, obj, writer):
raise NotImplementedError()
def objectify(self, d, reader):
raise NotImplementedError()
class _BytecodeSerializer(_GraphSONTypeIO):
@classmethod
def _dictify_instructions(cls, instructions, writer):
out = []
for instruction in instructions:
inst = [instruction[0]]
inst.extend(writer.toDict(arg) for arg in instruction[1:])
out.append(inst)
return out
@classmethod
def dictify(cls, bytecode, writer):
if isinstance(bytecode, Traversal):
bytecode = bytecode.bytecode
out = {}
if bytecode.source_instructions:
out["source"] = cls._dictify_instructions(bytecode.source_instructions, writer)
if bytecode.step_instructions:
out["step"] = cls._dictify_instructions(bytecode.step_instructions, writer)
return GraphSONUtil.typedValue("Bytecode", out)
class TraversalSerializer(_BytecodeSerializer):
python_type = Traversal
class BytecodeSerializer(_BytecodeSerializer):
python_type = Bytecode
class VertexSerializer(_GraphSONTypeIO):
python_type = Vertex
graphson_type = "g:Vertex"
@classmethod
def dictify(cls, vertex, writer):
return GraphSONUtil.typedValue("Vertex", {"id": writer.toDict(vertex.id),
"label": writer.toDict(vertex.label)})
class EdgeSerializer(_GraphSONTypeIO):
python_type = Edge
graphson_type = "g:Edge"
@classmethod
def dictify(cls, edge, writer):
return GraphSONUtil.typedValue("Edge", {"id": writer.toDict(edge.id),
"outV": writer.toDict(edge.outV.id),
"outVLabel": writer.toDict(edge.outV.label),
"label": writer.toDict(edge.label),
"inV": writer.toDict(edge.inV.id),
"inVLabel": writer.toDict(edge.inV.label)})
class VertexPropertySerializer(_GraphSONTypeIO):
python_type = VertexProperty
graphson_type = "g:VertexProperty"
@classmethod
def dictify(cls, vertex_property, writer):
return GraphSONUtil.typedValue("VertexProperty", {"id": writer.toDict(vertex_property.id),
"label": writer.toDict(vertex_property.label),
"value": writer.toDict(vertex_property.value),
"vertex": writer.toDict(vertex_property.vertex.id)})
class PropertySerializer(_GraphSONTypeIO):
python_type = Property
graphson_type = "g:Property"
@classmethod
def dictify(cls, property, writer):
elementDict = writer.toDict(property.element)
if elementDict is not None:
valueDict = elementDict["@value"]
if "outVLabel" in valueDict:
del valueDict["outVLabel"]
if "inVLabel" in valueDict:
del valueDict["inVLabel"]
if "properties" in valueDict:
del valueDict["properties"]
if "value" in valueDict:
del valueDict["value"]
return GraphSONUtil.typedValue("Property", {"key": writer.toDict(property.key),
"value": writer.toDict(property.value),
"element": elementDict})
class TraversalStrategySerializer(_GraphSONTypeIO):
python_type = TraversalStrategy
@classmethod
def dictify(cls, strategy, writer):
configuration = {}
for key in strategy.configuration:
configuration[key] = writer.toDict(strategy.configuration[key])
return GraphSONUtil.typedValue(strategy.strategy_name, configuration)
class TraverserIO(_GraphSONTypeIO):
python_type = Traverser
graphson_type = "g:Traverser"
@classmethod
def dictify(cls, traverser, writer):
return GraphSONUtil.typedValue("Traverser", {"value": writer.toDict(traverser.object),
"bulk": writer.toDict(traverser.bulk)})
@classmethod
def objectify(cls, d, reader):
return Traverser(reader.toObject(d["value"]),
reader.toObject(d["bulk"]))
class EnumSerializer(_GraphSONTypeIO):
python_type = Enum
@classmethod
def dictify(cls, enum, _):
return GraphSONUtil.typedValue(cls.unmangleKeyword(type(enum).__name__),
cls.unmangleKeyword(str(enum.name)))
class PSerializer(_GraphSONTypeIO):
python_type = P
@classmethod
def dictify(cls, p, writer):
out = {"predicate": p.operator,
"value": [writer.toDict(p.value), writer.toDict(p.other)] if p.other is not None else
writer.toDict(p.value)}
return GraphSONUtil.typedValue("P", out)
class TextPSerializer(_GraphSONTypeIO):
python_type = TextP
@classmethod
def dictify(cls, p, writer):
out = {"predicate": p.operator,
"value": [writer.toDict(p.value), writer.toDict(p.other)] if p.other is not None else
writer.toDict(p.value)}
return GraphSONUtil.typedValue("TextP", out)
class BindingSerializer(_GraphSONTypeIO):
python_type = Binding
@classmethod
def dictify(cls, binding, writer):
out = {"key": binding.key,
"value": writer.toDict(binding.value)}
return GraphSONUtil.typedValue("Binding", out)
class LambdaSerializer(_GraphSONTypeIO):
python_type = FunctionType
@classmethod
def dictify(cls, lambda_object, writer):
lambda_result = lambda_object()
script = lambda_result if isinstance(lambda_result, str) else lambda_result[0]
language = statics.default_lambda_language if isinstance(lambda_result, str) else lambda_result[1]
out = {"script": script,
"language": language}
if language == "gremlin-groovy" and "->" in script:
# if the user has explicitly added parameters to the groovy closure then we can easily detect one or two
# arg lambdas - if we can't detect 1 or 2 then we just go with "unknown"
args = script[0:script.find("->")]
out["arguments"] = 2 if "," in args else 1
else:
out["arguments"] = -1
return GraphSONUtil.typedValue("Lambda", out)
class TypeSerializer(_GraphSONTypeIO):
python_type = TypeType
@classmethod
def dictify(cls, typ, writer):
return writer.toDict(typ())
class UUIDIO(_GraphSONTypeIO):
python_type = uuid.UUID
graphson_type = "g:UUID"
graphson_base_type = "UUID"
@classmethod
def dictify(cls, obj, writer):
return GraphSONUtil.typedValue(cls.graphson_base_type, str(obj))
@classmethod
def objectify(cls, d, reader):
return cls.python_type(d)
class DateIO(_GraphSONTypeIO):
python_type = datetime.datetime
graphson_type = "g:Date"
graphson_base_type = "Date"
@classmethod
def dictify(cls, obj, writer):
try:
timestamp_seconds = calendar.timegm(obj.utctimetuple())
pts = timestamp_seconds * 1e3 + getattr(obj, 'microsecond', 0) / 1e3
except AttributeError:
pts = calendar.timegm(obj.timetuple()) * 1e3
ts = int(round(pts))
return GraphSONUtil.typedValue(cls.graphson_base_type, ts)
@classmethod
def objectify(cls, ts, reader):
# Python timestamp expects seconds
return datetime.datetime.utcfromtimestamp(ts / 1000.0)
# Based on current implementation, this class must always be declared before FloatIO.
# Seems pretty fragile for future maintainers. Maybe look into this.
class TimestampIO(_GraphSONTypeIO):
"""A timestamp in Python is type float"""
python_type = statics.timestamp
graphson_type = "g:Timestamp"
graphson_base_type = "Timestamp"
@classmethod
def dictify(cls, obj, writer):
# Java timestamp expects milliseconds integer
# Have to use int because of legacy Python
ts = int(round(obj * 1000))
return GraphSONUtil.typedValue(cls.graphson_base_type, ts)
@classmethod
def objectify(cls, ts, reader):
# Python timestamp expects seconds
return cls.python_type(ts / 1000.0)
class _NumberIO(_GraphSONTypeIO):
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
return GraphSONUtil.typedValue(cls.graphson_base_type, n)
@classmethod
def objectify(cls, v, _):
return cls.python_type(v)
class ListIO(_GraphSONTypeIO):
python_type = ListType
graphson_type = "g:List"
@classmethod
def dictify(cls, l, writer):
new_list = []
for obj in l:
new_list.append(writer.toDict(obj))
return GraphSONUtil.typedValue("List", new_list)
@classmethod
def objectify(cls, l, reader):
new_list = []
for obj in l:
new_list.append(reader.toObject(obj))
return new_list
class SetIO(_GraphSONTypeIO):
python_type = SetType
graphson_type = "g:Set"
@classmethod
def dictify(cls, s, writer):
new_list = []
for obj in s:
new_list.append(writer.toDict(obj))
return GraphSONUtil.typedValue("Set", new_list)
@classmethod
def objectify(cls, s, reader):
"""
By default, returns a python set
In case Java returns numeric values of different types which
python don't recognize, coerce and return a list.
See comments of TINKERPOP-1844 for more details
"""
new_list = [reader.toObject(obj) for obj in s]
new_set = set(new_list)
if len(new_list) != len(new_set):
log.warning("Coercing g:Set to list due to java numeric values. "
"See TINKERPOP-1844 for more details.")
return new_list
return new_set
class MapType(_GraphSONTypeIO):
python_type = DictType
graphson_type = "g:Map"
@classmethod
def dictify(cls, d, writer):
l = []
for key in d:
l.append(writer.toDict(key))
l.append(writer.toDict(d[key]))
return GraphSONUtil.typedValue("Map", l)
@classmethod
def objectify(cls, l, reader):
new_dict = {}
if len(l) > 0:
x = 0
while x < len(l):
new_dict[reader.toObject(l[x])] = reader.toObject(l[x + 1])
x = x + 2
return new_dict
class BulkSetIO(_GraphSONTypeIO):
graphson_type = "g:BulkSet"
@classmethod
def objectify(cls, l, reader):
new_list = []
# this approach basically mimics what currently existed in 3.3.4 and prior versions where BulkSet is
# basically just coerced to list. the limitation here is that if the value of a bulk exceeds the size of
# a list (into the long space) then stuff won't work nice.
if len(l) > 0:
x = 0
while x < len(l):
obj = reader.toObject(l[x])
bulk = reader.toObject(l[x + 1])
for y in range(bulk):
new_list.append(obj)
x = x + 2
return new_list
class FloatIO(_NumberIO):
python_type = FloatType
graphson_type = "g:Float"
graphson_base_type = "Float"
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
elif math.isnan(n):
return GraphSONUtil.typedValue(cls.graphson_base_type, "NaN")
elif math.isinf(n) and n > 0:
return GraphSONUtil.typedValue(cls.graphson_base_type, "Infinity")
elif math.isinf(n) and n < 0:
return GraphSONUtil.typedValue(cls.graphson_base_type, "-Infinity")
else:
return GraphSONUtil.typedValue(cls.graphson_base_type, n)
@classmethod
def objectify(cls, v, _):
if isinstance(v, str):
if v == 'NaN':
return float('nan')
elif v == "Infinity":
return float('inf')
elif v == "-Infinity":
return float('-inf')
return cls.python_type(v)
class BigDecimalIO(_NumberIO):
python_type = Decimal
graphson_type = "gx:BigDecimal"
graphson_base_type = "BigDecimal"
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
elif math.isnan(n):
return GraphSONUtil.typedValue(cls.graphson_base_type, "NaN", "gx")
elif math.isinf(n) and n > 0:
return GraphSONUtil.typedValue(cls.graphson_base_type, "Infinity", "gx")
elif math.isinf(n) and n < 0:
return GraphSONUtil.typedValue(cls.graphson_base_type, "-Infinity", "gx")
else:
return GraphSONUtil.typedValue(cls.graphson_base_type, str(n), "gx")
@classmethod
def objectify(cls, v, _):
if isinstance(v, str):
if v == 'NaN':
return Decimal('nan')
elif v == "Infinity":
return Decimal('inf')
elif v == "-Infinity":
return Decimal('-inf')
return Decimal(v)
class DoubleIO(FloatIO):
graphson_type = "g:Double"
graphson_base_type = "Double"
class Int64IO(_NumberIO):
python_type = LongType
graphson_type = "g:Int64"
graphson_base_type = "Int64"
@classmethod
def dictify(cls, n, writer):
# if we exceed Java long range then we need a BigInteger
if isinstance(n, bool):
return n
elif n < -9223372036854775808 or n > 9223372036854775807:
return GraphSONUtil.typedValue("BigInteger", str(n), "gx")
else:
return GraphSONUtil.typedValue(cls.graphson_base_type, n)
class BigIntegerIO(Int64IO):
graphson_type = "gx:BigInteger"
class Int32IO(Int64IO):
python_type = IntType
graphson_type = "g:Int32"
graphson_base_type = "Int32"
@classmethod
def dictify(cls, n, writer):
# if we exceed Java int range then we need a long
if isinstance(n, bool):
return n
elif n < -9223372036854775808 or n > 9223372036854775807:
return GraphSONUtil.typedValue("BigInteger", str(n), "gx")
elif n < -2147483648 or n > 2147483647:
return GraphSONUtil.typedValue("Int64", n)
else:
return GraphSONUtil.typedValue(cls.graphson_base_type, n)
class ByteIO(_NumberIO):
python_type = SingleByte
graphson_type = "gx:Byte"
graphson_base_type = "Byte"
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
return GraphSONUtil.typedValue(cls.graphson_base_type, n, "gx")
@classmethod
def objectify(cls, v, _):
return int.__new__(SingleByte, v)
class ByteBufferIO(_GraphSONTypeIO):
python_type = ByteBufferType
graphson_type = "gx:ByteBuffer"
graphson_base_type = "ByteBuffer"
@classmethod
def dictify(cls, n, writer):
return GraphSONUtil.typedValue(cls.graphson_base_type, "".join(chr(x) for x in n), "gx")
@classmethod
def objectify(cls, v, _):
return cls.python_type(v, "utf8")
class CharIO(_GraphSONTypeIO):
python_type = SingleChar
graphson_type = "gx:Char"
graphson_base_type = "Char"
@classmethod
def dictify(cls, n, writer):
return GraphSONUtil.typedValue(cls.graphson_base_type, n, "gx")
@classmethod
def objectify(cls, v, _):
return str.__new__(SingleChar, v)
class DurationIO(_GraphSONTypeIO):
python_type = timedelta
graphson_type = "gx:Duration"
graphson_base_type = "Duration"
@classmethod
def dictify(cls, n, writer):
return GraphSONUtil.typedValue(cls.graphson_base_type, duration_isoformat(n), "gx")
@classmethod
def objectify(cls, v, _):
return parse_duration(v)
class VertexDeserializer(_GraphSONTypeIO):
graphson_type = "g:Vertex"
@classmethod
def objectify(cls, d, reader):
return Vertex(reader.toObject(d["id"]), d.get("label", "vertex"))
class EdgeDeserializer(_GraphSONTypeIO):
graphson_type = "g:Edge"
@classmethod
def objectify(cls, d, reader):
return Edge(reader.toObject(d["id"]),
Vertex(reader.toObject(d["outV"]), d.get("outVLabel", "vertex")),
d.get("label", "edge"),
Vertex(reader.toObject(d["inV"]), d.get("inVLabel", "vertex")))
class VertexPropertyDeserializer(_GraphSONTypeIO):
graphson_type = "g:VertexProperty"
@classmethod
def objectify(cls, d, reader):
vertex = Vertex(reader.toObject(d.get("vertex"))) if "vertex" in d else None
return VertexProperty(reader.toObject(d["id"]),
d["label"],
reader.toObject(d["value"]),
vertex)
class PropertyDeserializer(_GraphSONTypeIO):
graphson_type = "g:Property"
@classmethod
def objectify(cls, d, reader):
element = reader.toObject(d["element"]) if "element" in d else None
return Property(d["key"], reader.toObject(d["value"]), element)
class PathDeserializer(_GraphSONTypeIO):
graphson_type = "g:Path"
@classmethod
def objectify(cls, d, reader):
return Path(reader.toObject(d["labels"]), reader.toObject(d["objects"]))
class TDeserializer(_GraphSONTypeIO):
graphson_type = "g:T"
@classmethod
def objectify(cls, d, reader):
return T[d]
class DirectionIO(_GraphSONTypeIO):
graphson_type = "g:Direction"
graphson_base_type = "Direction"
python_type = Direction
@classmethod
def dictify(cls, d, writer):
return GraphSONUtil.typedValue(cls.graphson_base_type, d.name, "g")
@classmethod
def objectify(cls, d, reader):
return Direction[d]
class TraversalMetricsDeserializer(_GraphSONTypeIO):
graphson_type = "g:TraversalMetrics"
@classmethod
def objectify(cls, d, reader):
return reader.toObject(d)
class MetricsDeserializer(_GraphSONTypeIO):
graphson_type = "g:Metrics"
@classmethod
def objectify(cls, d, reader):
return reader.toObject(d)
| apache-2.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/pygments/formatters/latex.py | 31 | 17758 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', u'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| bsd-3-clause |
Therp/odoo | addons/website_mail/models/mail_message.py | 264 | 4705 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
from openerp.tools.translate import _
from openerp.osv import osv, fields, expression
class MailMessage(osv.Model):
_inherit = 'mail.message'
def _get_description_short(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, False)
for message in self.browse(cr, uid, ids, context=context):
if message.subject:
res[message.id] = message.subject
else:
plaintext_ct = '' if not message.body else html2plaintext(message.body)
res[message.id] = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '')
return res
_columns = {
'description': fields.function(
_get_description_short, type='char',
help='Message description: either the subject, or the beginning of the body'
),
'website_published': fields.boolean(
'Published', help="Visible on the website as a comment", copy=False,
),
}
def default_get(self, cr, uid, fields_list, context=None):
defaults = super(MailMessage, self).default_get(cr, uid, fields_list, context=context)
# Note: explicitly implemented in default_get() instead of _defaults,
# to avoid setting to True for all existing messages during upgrades.
# TODO: this default should probably be dynamic according to the model
# on which the messages are attached, thus moved to create().
if 'website_published' in fields_list:
defaults.setdefault('website_published', True)
return defaults
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to restrict
messages to published messages for public users. """
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
args = expression.AND([[('website_published', '=', True)], list(args)])
return super(MailMessage, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Add Access rules of mail.message for non-employee user:
- read:
- raise if the type is comment and subtype NULL (internal note)
"""
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (ids,))
if cr.fetchall():
raise osv.except_osv(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation))
return super(MailMessage, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
| agpl-3.0 |
aocks/ox_herd | ox_herd/core/plugins/pytest_plugin/core.py | 1 | 15835 | """Module containing some plugin to run pytest.
"""
import configparser
import logging
import os
import tempfile
import json
import subprocess
import shlex
import urllib
import urllib.parse
import urllib.request
import hmac
import jinja2
import xmltodict
import yaml
from ox_herd import settings as ox_herd_settings
from ox_herd.core.plugins import base
from ox_herd.core.ox_tasks import OxHerdTask
from ox_herd.core.plugins.pytest_plugin import forms
from ox_herd.core.plugins import post_to_github_plugin
class OxHerdPyTestPlugin(base.OxPlugin):
"""Plugin to provide pytest services for ox_herd
"""
__blueprint = None
@classmethod
def set_bp(cls, my_bp):
cls.__blueprint = my_bp
def get_flask_blueprint(self):
"""Implement as required by OxPlugin."""
return self.__class__.get_bp()
@classmethod
def get_bp(cls):
return cls.__blueprint
def name(self):
"""Implement as required by OxPlugin."""
return 'pytest_plugin'
def description(self):
"""Implement as required by OxPlugin."""
return "Plugin to provide pytest services for ox_herd."
def get_components(self):
return [RunPyTest('plugin component')]
class RunPyTest(OxHerdTask, base.OxPluginComponent):
def __init__(self, *args, url=None, pytest_cmd=None, xml_file=None,
github_info=None, **kw):
"""Initializer.
:arg *args: Argumnets to OxHerdTask.__init__.
:arg url: URL representing where to run pytest on.
:arg pytest_cmd: String with command line arguments for running pytest.
:arg xml_file=None: Optional path for where to store xml_file
with test results. Usually better to leave this
as None indicating to just use a temp file.
Sometimes can be useful for testing.
:arg github_info=None: Optional json object containing info about
github repo and issue to post comment to.
:arg **kw: Keyword arguments to OxHerdTask.__init__.
"""
OxHerdTask.__init__(self, *args, **kw)
self.pytest_cmd = pytest_cmd
self.xml_file = xml_file
self.url = url
self.github_info = github_info
@classmethod
def make_task_from_request(cls, request, pull_url_type='html'):
"""Make an instance of this task from a web request.
:arg request: Web request in json format (e.g., from GitHub webhook)
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Instance of cls designed to execute a test based
on information from webhook.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Create instance of cls based on a hit to the ox_herd/pytest
endpoint (which must be of type application/json). This
does things like validate the HMAC from github, pull out
the payload from the request, and configure the task.
After that you can call launch_raw_task on the returned
value if you have an OxScheduler object.
"""
payload = json.loads(request.data.decode('utf8'))
my_pr = payload['pull_request']
my_conf, dummy_my_sec = cls._get_config_info(my_pr)
cls._validate_request(request, my_conf['github_secret'])
sha = my_pr['head']['sha']
name = 'github_pr_pytest_%s_%s' % (sha[:10], my_pr['updated_at'])
task = RunPyTest(
name=name, url=payload['repository']['%s_url' % pull_url_type],
pytest_cmd='--doctest-modules', timeout=3000,
github_info=my_pr)
return task
@staticmethod
def cmd_name():
"""Provide name as required by OxPluginComponent.
"""
return 'pytest'
@staticmethod
def get_template_name():
"Override to use custom template to report py test results."
return 'py_test_report.html'
@classmethod
def main_call(cls, ox_herd_task):
test_file = ox_herd_task.xml_file if ox_herd_task.xml_file else (
tempfile.mktemp(suffix='.xml'))
# Create a temporary directory with a context manager so that we
# can safely use it inside the call and be confident it will get
# cleaned up properly.
with tempfile.TemporaryDirectory(suffix='.ox_pytest') as my_tmp_dir:
url, cmd_line = cls.do_test(ox_herd_task, test_file, my_tmp_dir)
test_data = cls.make_report(ox_herd_task, test_file, url, cmd_line)
if not ox_herd_task.xml_file:
logging.debug('Removing temporary xml report %s', test_file)
os.remove(test_file) # remove temp file
cls.post_results_to_github(ox_herd_task, test_data)
rval = test_data['summary']
return {'return_value' : rval, 'json_blob' : test_data}
@classmethod
def do_test(cls, py_test_args, test_file, my_tmp_dir):
# Will force PYTHONPATH into my_env to ensure we test the
# right thing
my_env = os.environ.copy()
pta = py_test_args.pytest_cmd
clone_path = None
if isinstance(pta, str):
pta = shlex.split(pta)
pta.append('--boxed')
url = urllib.parse.urlparse(py_test_args.url)
if url.scheme == 'file':
cmd_line = [url.path, '--junitxml', test_file, '-v'] + pta
my_env['PYTHONPATH'] = url.path
elif url.scheme == '' and url.path[:15] == 'git@github.com:':
clone_path = url.path
elif url.scheme == 'https':
my_conf, dummy_sec = cls._get_config_info(py_test_args.github_info)
if 'github_token' in my_conf:
clone_path = 'https://%s@%s%s' % (my_conf[
'github_token'], url.netloc, url.path)
else:
clone_path = url.geturl()
else:
raise ValueError('URL scheme/path = "%s/%s" not handled yet.' % (
url.scheme, url.path))
if clone_path:
cls.prep_git_clone(py_test_args, clone_path, my_tmp_dir, my_env)
cmd_line = [my_tmp_dir, '--junitxml', test_file, '-v'] + pta
logging.info('Running pytest on %s with command arguments of: %s',
my_tmp_dir, str(cmd_line))
subprocess.call(['py.test'] + cmd_line, env=my_env)
return url, cmd_line
@classmethod
def prep_git_clone(cls, py_test_args, clone_path, my_tmp_dir, my_env):
# If you are using github, then we need gitpython so import it
# here so non-github users do not need it
from git import Repo
if py_test_args.github_info:
sha = py_test_args.github_info['head']['sha']
repo_name = py_test_args.github_info['head']['repo']['name']
else:
sha, repo_name = None, os.path.split(
clone_path)[-1].split('.git')[0]
logging.warning('Preparing to clone %s', clone_path)
my_repo = Repo.clone_from(clone_path, my_tmp_dir + '/' + repo_name)
logging.info('Finished cloning %s', clone_path)
if sha is not None:
my_repo.git.checkout(py_test_args.github_info['head']['sha'])
new_repo = os.path.join(my_tmp_dir, repo_name)
my_env['PYTHONPATH'] = '%s:%s' % (new_repo, my_tmp_dir)
yaml_file = os.path.join(new_repo, 'ox_herd_test.yaml')
if os.path.exists(yaml_file):
yconfig = yaml.safe_load(open(yaml_file).read())
my_env['PYTHONPATH'] = ':'.join([name for name in yconfig.pop(
'prepend_pypaths', [])] + [my_env['PYTHONPATH']])
for gname, gpath in yconfig.pop('git_clones', {}).items():
Repo.clone_from(gpath, os.path.join(my_tmp_dir, gname))
if 'PYTEST_ADDOPTS' in yconfig:
my_env['PYTEST_ADDOPTS'] = yconfig.pop('PYTEST_ADDOPTS')
if yconfig:
logging.warning('Unprocessed items in yaml file: %s', str(
yconfig))
@classmethod
def post_results_to_github(cls, ox_herd_task, test_data):
"""Helper method to post test results to github.
:arg ox_herd_task: The Ox Herd task containing data.
:arg test_data: A dictionary containing the result of running
tests as produced by make_report.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: If he user has his .ox_herd_conf file setup to include
a [pytest/DEFAULT] section (or a section like
[pytest/owner/repo]) with a github_user and
github_token with access to the repo in
ox_herd_task.github_info, then we will try
to post the results of the test as a comment in
a github issue.
This is a key feature in using this plugin for
continuous integration with github.
"""
if not ox_herd_task.github_info:
return
grepo = ox_herd_task.github_info['head']['repo']['full_name']
grepo = grepo.strip()
sha = ox_herd_task.github_info['head']['sha']
tmsg = 'Testing commit %s' % sha
my_conf, dummy_sec = cls._get_config_info(ox_herd_task.github_info)
msg = '%s\n\nTested %s:\n%s\n' % (tmsg, grepo, test_data['summary'])
failures = int(test_data['testsuite']['@errors']) + int(
test_data['testsuite']['@failures'])
if failures:
msg += '\n\n' + jinja2.Environment(loader=jinja2.FileSystemLoader(
os.path.dirname(forms.__file__).rstrip(
'/') + '/templates/')).get_template(
'py_test_failures.html').render(
test_list=test_data['tests'])
if 'github_issue' in my_conf:
title = my_conf['github_issue']
number = None
else:
title = ox_herd_task.github_info['title']
number = ox_herd_task.github_info['number']
full_repo = ox_herd_task.github_info['head']['repo']['full_name']
cthread = post_to_github_plugin.PostToGitHub.prep_comment_thread(
title, number, full_repo, my_conf)
cthread.add_comment(msg, allow_create=True)
@classmethod
def _get_config_info(cls, github_info):
"""Get configuration info from OX_HERD_CONF file based on github_info.
:arg github_info: Dictionary with data about github repo or None
to use section pytest/DEFAULT
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: A dictionary from configparser.ConfigParser pulled out of
OX_HERD_CONF file based on the repo in github.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Figure out which repo we are dealing with and extract
configuration from OX_HERD_CONF.
"""
config_file = cls.get_conf_file()
my_config = configparser.ConfigParser()
my_config.read(config_file)
if github_info:
owner, repo = github_info['head']['repo']['full_name'].split('/')
section = 'pytest/%s/%s' % (owner, repo)
else:
section = None
if section is not None and section in my_config:
my_sec = section
else:
my_sec = 'pytest/DEFAULT'
my_data = my_config[my_sec]
return my_data, my_sec
@staticmethod
def get_conf_file():
"Helper to deduce config file."
return ox_herd_settings.OX_HERD_CONF
@staticmethod
def _validate_request(request, secret):
"""Validate github signature on request.
:arg request: Web request of type application/json
:arg secret: Secret used for HMAC.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Verify the HMAC or raise ValueError.
"""
header_signature = request.headers.get('X-Hub-Signature')
if header_signature is None:
raise ValueError(
'No header signature provided to validate request')
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
raise ValueError('Header signature "%s" not supported.' % sha_name)
request_data = request.data
mac = hmac.new(bytes(secret, 'utf8'), request_data, digestmod='sha1')
if not str(mac.hexdigest()) == str(signature):
raise ValueError('Request digest does not match signature %s' % (
str(signature)))
@staticmethod
def make_report(my_task, test_file, url, cmd_line):
test_data = xmltodict.parse(open(test_file, 'rb').read(),
xml_attribs=True)
test_data['url'] = url
test_data['cmd_line'] = cmd_line
test_data['task_name'] = my_task.name
summary_fields = ['errors', 'failures', 'skips', 'tests', 'time']
fields = test_data['testsuite']
logging.info('Test fields are %s', ', '.join(map(str, fields)))
test_data['summary'] = 'Test resultls: ' + ', '.join([
'%s: %s' % (name, test_data['testsuite'].get(
'@' + name, 'None'))
for name in summary_fields])
test_data['tests'] = test_data['testsuite']['testcase']
return test_data
def get_ox_task_cls(self):
return self.__class__
def get_flask_form_via_cls(cls):
result = forms.SchedJobForm
logging.debug('Providing form %s for cls %s', result, cls)
return result
@classmethod
def make_push_warn_task(cls, request, warnables=('refs/heads/master',)):
"""Helper to make a task to warn about direct pushes to master.
:arg request: The web request from a github webhook.
:arg warnables=('refs/heads/master',): Tuple of strings to warn about.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: An instance of the PostToGitHub class that when run
will post a message to github warning about pushing
directly to master.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Support continuous integration via pull requests by
creating a task that will warn about direct pushes to
master. This is intended to be called by the pytest
route if a push event is seen.
"""
payload = json.loads(request.data.decode('utf8'))
cname = payload.get('head_commit', {}).get('committer', {}).get(
'name', {})
if cname == 'GitHub': # This was a pull request merge so return None
return None
gh_info = {'head': {'repo': payload['repository']},
'title': 'push_warning', 'number': None}
my_conf, my_sec = cls._get_config_info(gh_info)
if payload['ref'] not in warnables:
logging.debug('Pushing to %s not %s so not warning on push',
payload['ref'], str(warnables))
return None
full_repo = payload['repository']['full_name']
title = 'warning_push'
cls._validate_request(request, my_conf['github_secret'])
msg = 'Warning: %s pushed to %s on %s' % (
payload['sender']['login'], payload['ref'], full_repo)
task = post_to_github_plugin.PostToGitHub(
msg, full_repo, title, None, cls.get_conf_file(), my_sec,
name='github_posting')
return task
| bsd-2-clause |
rnikiforova/GuruTubeProject | GuruTube/libraries/django/contrib/auth/urls.py | 104 | 1443 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'),
url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
# Support old style base36 password reset links; remove in Django 1.7
url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm_uidb36'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm',
name='password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'),
)
| bsd-3-clause |
GeyerA/android_external_chromium_org | chrome/common/extensions/docs/server2/patched_file_system_test.py | 31 | 6616 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
import unittest
from file_system import FileSystem, FileNotFoundError, StatInfo
from future import Future
from patched_file_system import PatchedFileSystem
from patcher import Patcher
from test_file_system import TestFileSystem
from test_patcher import TestPatcher
import url_constants
_TEST_FS_DATA = {
'dir1': {
'file1.html': 'This is dir1/file1.html',
'unmodified': {
'1': '1',
'2': '',
},
},
'dir2': {
'subdir1': {
'sub1.txt': 'in subdir(1)',
'sub2.txt': 'in subdir(2)',
'sub3.txt': 'in subdir(3)',
},
},
'dir3': {
},
'dir4': {
'one.txt': '',
},
'dir5': {
'subdir': {
'1.txt': '555',
},
},
'test1.txt': 'test1',
'test2.txt': 'test2',
}
_TEST_PATCH_VERSION = '1001'
_TEST_PATCH_FILES = (
# Added
[
'test3.txt',
'dir1/file2.html',
'dir1/newsubdir/a.js',
'newdir/1.html',
],
# Deleted
[
'test2.txt',
'dir2/subdir1/sub1.txt',
'dir4/one.txt',
'dir5/subdir/1.txt',
],
# Modified
[
'dir2/subdir1/sub2.txt',
]
)
_TEST_PATCH_DATA = {
'test3.txt': 'test3 is added.',
'dir1/file2.html': 'This is dir1/file2.html',
'dir1/newsubdir/a.js': 'This is a.js',
'newdir/1.html': 'This comes from a new dir.',
'dir2/subdir1/sub2.txt': 'in subdir',
}
class PatchedFileSystemTest(unittest.TestCase):
def setUp(self):
self._patcher = TestPatcher(_TEST_PATCH_VERSION,
_TEST_PATCH_FILES,
_TEST_PATCH_DATA)
self._host_file_system = TestFileSystem(_TEST_FS_DATA)
self._file_system = PatchedFileSystem(self._host_file_system,
self._patcher)
def testRead(self):
expected = deepcopy(_TEST_PATCH_DATA)
# Files that are not modified.
expected.update({
'dir2/subdir1/sub3.txt': 'in subdir(3)',
'dir1/file1.html': 'This is dir1/file1.html',
})
for key in expected:
self.assertEqual(expected[key], self._file_system.ReadSingle(key))
self.assertEqual(
expected,
self._file_system.Read(expected.keys()).Get())
self.assertRaises(FileNotFoundError, self._file_system.ReadSingle,
'test2.txt')
self.assertRaises(FileNotFoundError, self._file_system.ReadSingle,
'dir2/subdir1/sub1.txt')
self.assertRaises(FileNotFoundError, self._file_system.ReadSingle,
'not_existing')
self.assertRaises(FileNotFoundError, self._file_system.ReadSingle,
'dir1/not_existing')
self.assertRaises(FileNotFoundError, self._file_system.ReadSingle,
'dir1/newsubdir/not_existing')
def testReadDir(self):
self.assertEqual(sorted(self._file_system.ReadSingle('dir1/')),
sorted(set(self._host_file_system.ReadSingle('dir1/')) |
set(('file2.html', 'newsubdir/'))))
self.assertEqual(sorted(self._file_system.ReadSingle('dir1/newsubdir/')),
sorted(['a.js']))
self.assertEqual(sorted(self._file_system.ReadSingle('dir2/')),
sorted(self._host_file_system.ReadSingle('dir2/')))
self.assertEqual(sorted(self._file_system.ReadSingle('dir2/subdir1/')),
sorted(set(self._host_file_system.ReadSingle('dir2/subdir1/')) -
set(('sub1.txt',))))
self.assertEqual(sorted(self._file_system.ReadSingle('newdir/')),
sorted(['1.html']))
self.assertEqual(self._file_system.ReadSingle('dir3/'), [])
self.assertEqual(self._file_system.ReadSingle('dir4/'), [])
self.assertRaises(FileNotFoundError, self._file_system.ReadSingle,
'not_existing_dir/')
def testStat(self):
version = 'patched_%s' % self._patcher.GetVersion()
old_version = self._host_file_system.Stat('dir1/file1.html').version
# Stat an unmodified file.
self.assertEqual(self._file_system.Stat('dir1/file1.html'),
self._host_file_system.Stat('dir1/file1.html'))
# Stat an unmodified directory.
self.assertEqual(self._file_system.Stat('dir1/unmodified/'),
self._host_file_system.Stat('dir1/unmodified/'))
# Stat a modified directory.
self.assertEqual(self._file_system.Stat('dir2/'),
StatInfo(version, {'subdir1/': version}))
self.assertEqual(self._file_system.Stat('dir2/subdir1/'),
StatInfo(version, {'sub2.txt': version,
'sub3.txt': old_version}))
# Stat a modified directory with new files.
expected = self._host_file_system.Stat('dir1/')
expected.version = version
expected.child_versions.update({'file2.html': version,
'newsubdir/': version})
self.assertEqual(self._file_system.Stat('dir1/'),
expected)
# Stat an added directory.
self.assertEqual(self._file_system.Stat('dir1/newsubdir/'),
StatInfo(version, {'a.js': version}))
self.assertEqual(self._file_system.Stat('dir1/newsubdir/a.js'),
StatInfo(version))
self.assertEqual(self._file_system.Stat('newdir/'),
StatInfo(version, {'1.html': version}))
self.assertEqual(self._file_system.Stat('newdir/1.html'),
StatInfo(version))
# Stat files removed in the patch.
self.assertRaises(FileNotFoundError, self._file_system.Stat,
'dir2/subdir1/sub1.txt')
self.assertRaises(FileNotFoundError, self._file_system.Stat,
'dir4/one.txt')
# Stat empty directories.
self.assertEqual(self._file_system.Stat('dir3/'),
StatInfo(old_version, {}))
self.assertEqual(self._file_system.Stat('dir4/'),
StatInfo(version, {}))
self.assertEqual(self._file_system.Stat('dir5/subdir/'),
StatInfo(version, {}))
# Stat empty (after patch) directory's parent
self.assertEqual(self._file_system.Stat('dir5/'),
StatInfo(version, {'subdir/': version}))
# Stat files that don't exist either before or after patching.
self.assertRaises(FileNotFoundError, self._file_system.Stat,
'not_existing/')
self.assertRaises(FileNotFoundError, self._file_system.Stat,
'dir1/not_existing/')
self.assertRaises(FileNotFoundError, self._file_system.Stat,
'dir1/not_existing')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jjneely/ansible | lib/ansible/runner/action_plugins/add_host.py | 10 | 4087 | # Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
from ansible.utils import parse_kv
from ansible.inventory.host import Host
from ansible.inventory.group import Group
class ActionModule(object):
''' Create inventory hosts and groups in the memory inventory'''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
args = {}
if complex_args:
args.update(complex_args)
args.update(parse_kv(module_args))
if not 'hostname' in args and not 'name' in args:
raise ae("'name' is a required argument.")
result = {}
# Parse out any hostname:port patterns
new_name = args.get('name', args.get('hostname', None))
vv("creating host via 'add_host': hostname=%s" % new_name)
if ":" in new_name:
new_name, new_port = new_name.split(":")
args['ansible_ssh_port'] = new_port
# redefine inventory and get group "all"
inventory = self.runner.inventory
allgroup = inventory.get_group('all')
# check if host in cache, add if not
if new_name in inventory._hosts_cache:
new_host = inventory._hosts_cache[new_name]
else:
new_host = Host(new_name)
# only groups can be added directly to inventory
inventory._hosts_cache[new_name] = new_host
allgroup.add_host(new_host)
# Add any variables to the new_host
for k in args.keys():
if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
new_host.set_variable(k, args[k])
groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
# add it to the group if that was specified
if groupnames:
for group_name in groupnames.split(","):
group_name = group_name.strip()
if not inventory.get_group(group_name):
new_group = Group(group_name)
inventory.add_group(new_group)
grp = inventory.get_group(group_name)
grp.add_host(new_host)
# add this host to the group cache
if inventory._groups_list is not None:
if group_name in inventory._groups_list:
if new_host.name not in inventory._groups_list[group_name]:
inventory._groups_list[group_name].append(new_host.name)
vv("added host to group via add_host module: %s" % group_name)
result['new_groups'] = groupnames.split(",")
result['new_host'] = new_name
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
inventory.clear_pattern_cache()
return ReturnData(conn=conn, comm_ok=True, result=result)
| gpl-3.0 |
pliz/gunfolds | scripts/rate_agnostic_runner.py | 1 | 7057 | import sys, os
sys.path.append('./tools/')
import traversal, bfutils, graphkit
import unknownrate as ur
from multiprocessing import Pool,Process, Queue, cpu_count, current_process, active_children
import functools
import zickle as zkl
import time, socket
import scipy
KEY='rasl_il_u2'
UMAX = 2
INPNUM = 1 # number of randomized starts per graph
CAPSIZE= 1000 # stop traversing after growing equivalence class tothis size
REPEATS = 100
if socket.gethostname().split('.')[0] == 'leibnitz':
PNUM=60
PNUM=max((1,PNUM/INPNUM))
elif socket.gethostname().split('.')[0] == 'mars':
PNUM=20
PNUM=max((1,PNUM/INPNUM))
elif socket.gethostname().split('.')[0] == 'hooke':
PNUM=21
PNUM=max((1,PNUM/INPNUM))
elif socket.gethostname().split('.')[0] == 'saturn':
PNUM=12
PNUM=max((1,PNUM/INPNUM))
else:
# Setting the number of parallel running processes to the number
# of cores minus 7% for breathing room
PNUM=cpu_count()-int(0.07*cpu_count())
PNUM=max((1,PNUM/INPNUM))
print 'processes: ',PNUM, INPNUM
def multiprocess(argslist, ncpu):
total = len(argslist)
done = 0
result_queue = Queue()
jobs = []
def ra_wrapper_(fold, n=10, k=10):
scipy.random.seed()
l = {}
while True:
try:
g = bfutils.ringmore(n,k) # random ring of given density
gs= bfutils.call_undersamples(g)
for u in range(1,min([len(gs),UMAX])):
g2 = bfutils.undersample(g,u)
print fold,': ',traversal.density(g),':',
startTime = int(round(time.time() * 1000))
s = ur.liteqclass(g2, verbose=False, capsize=CAPSIZE)
endTime = int(round(time.time() * 1000))
print len(s), u
l[u] = {'eq':s,'ms':endTime-startTime}
except MemoryError:
print 'memory error... retrying'
continue
break
result_queue.put( {'gt':g,'solutions':l} )
while argslist != [] and done<10 :
if len(active_children()) < ncpu:
p = Process(target=ra_wrapper_,args=(argslist.pop(),))
jobs.append(p)
p.start()
done+=1
print "\r",float(done)/total,"%",
#get results here
res = [result_queue.get() for p in jobs]
print res
def ra_wrapper(fold, n=10, k=10):
scipy.random.seed()
l = {}
while True:
try:
g = bfutils.ringmore(n,k) # random ring of given density
gs= bfutils.call_undersamples(g)
for u in range(1,min([len(gs),UMAX])):
g2 = bfutils.undersample(g,u)
print fold,': ',traversal.density(g),':', traversal.density(g2),':',
startTime = int(round(time.time() * 1000))
#s = ur.liteqclass(g2, verbose=False, capsize=CAPSIZE)
s = ur.eqclass(g2)
endTime = int(round(time.time() * 1000))
print len(s), u
l[u] = {'eq':s,'ms':endTime-startTime}
except MemoryError:
print 'memory error... retrying'
continue
break
return {'gt':g,'solutions':l}
def ra_wrapper_preset(fold, glist=[]):
scipy.random.seed()
l = {}
while True:
try:
g = glist[fold]
gs= bfutils.call_undersamples(g)
for u in range(1,min([len(gs),UMAX])):
g2 = bfutils.undersample(g,u)
print fold,': ',traversal.density(g),':',
startTime = int(round(time.time() * 1000))
s = ur.liteqclass(g2, verbose=False, capsize=CAPSIZE)
endTime = int(round(time.time() * 1000))
print len(s), u
l[u] = {'eq':s,'ms':endTime-startTime}
except MemoryError:
print 'memory error... retrying'
continue
break
return {'gt':g,'solutions':l}
def killall(l):
for e in l:
e.join(timeout=0.001)
if not e.is_alive():
#print 'first result'
for p in l:
if p != e:
#print 'terminating ', p.name
p.terminate()
p.join()
else:
p.join()
return True
return False
def fan_wrapper(fold,n=10,k=10):
scipy.random.seed()
curr_proc=current_process()
curr_proc.daemon=False
output = Queue()
while True:
try:
g = bfutils.ringmore(n,k)
gdens = traversal.density(g)
g2 = bfutils.increment_u(g,g)
#g2 = bfutils.undersample(g,2)
def inside_wrapper():
scipy.random.seed()
try:
startTime = int(round(time.time() * 1000))
#s = traversal.v2g22g1(g2, capsize=CAPSIZE)
s = traversal.backtrack_more2(g2, rate=2, capsize=CAPSIZE)
endTime = int(round(time.time() * 1000))
print "{:2}: {:8} : {:4} {:10} seconds".\
format(fold, round(gdens,3), len(s),
round((endTime-startTime)/1000.,3))
output.put({'gt':g,'eq':s,'ms':endTime-startTime})
except MemoryError:
print 'memory error...'
raise
pl = [Process(target=inside_wrapper) for x in range(INPNUM)]
for e in pl: e.start()
while True:
if killall(pl): break
r = output.get()
except MemoryError:
print 'memory error... retrying'
for p in pl:
p.terminate()
p.join()
continue
break
for p in pl: p.join()
return r
densities = {5: [0.2],
6: [0.2, .25, .3],
7: [0.2, .25, .3],
8: [0.15, 0.2, 0.25, 0.3],
9: [.2],
10:[.2],
15:[0.2],
20:[0.1],# 0.15, 0.2, 0.25, 0.3],
25:[0.1],
30:[0.1],
35:[0.1],
40:[0.1],
50:[0.05, 0.1],
60:[0.05, 0.1]}
for nodes in [5]:
z = {}
#pool=Pool(processes=PNUM)
for dens in densities[nodes]:
print "{:2}: {:8} : {:10} : {:10} {:10}".format('id', 'densityi(G)', 'density(H)', 'eq class', 'time')
e = bfutils.dens2edgenum(dens, n=nodes)
eqclasses = []
for x in pool.imap(functools.partial(ra_wrapper, n=nodes, k=e),
range(REPEATS)):
eqclasses.append(x)
z[dens] = eqclasses
zkl.save(eqclasses,
socket.gethostname().split('.')[0]+\
'_nodes_'+str(nodes)+'_density_'+\
str(dens)+'_'+KEY+'_.zkl')
print ''
print '----'
print ''
#pool.close()
#pool.join()
#zkl.save(z,socket.gethostname().split('.')[0]+'_nodes_'+str(nodes)+'_'+KEY+'_.zkl')
| gpl-3.0 |
BMJHayward/django | django/contrib/gis/db/backends/postgis/models.py | 396 | 2158 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause |
apocquet/django | django/contrib/gis/geos/base.py | 437 | 1280 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause |
Numel2020/NUMEL-Transitions | node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
makinacorpus/django | django/contrib/auth/middleware.py | 158 | 4108 | from django.contrib import auth
from django.contrib.auth import load_backend
from django.contrib.auth.backends import RemoteUserBackend
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class AuthenticationMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), "The Django authentication middleware requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
request.user = SimpleLazyObject(lambda: get_user(request))
class RemoteUserMiddleware(object):
"""
Middleware for utilizing Web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then remove any existing
# authenticated remote-user, or return (leaving request.user set to
# AnonymousUser by the AuthenticationMiddleware).
if request.user.is_authenticated():
try:
stored_backend = load_backend(request.session.get(
auth.BACKEND_SESSION_KEY, ''))
if isinstance(stored_backend, RemoteUserBackend):
auth.logout(request)
except ImproperlyConfigured as e:
# backend failed to load
auth.logout(request)
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.get_username() == self.clean_username(username, request):
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
| bsd-3-clause |
istinspring/grab | grab/spider/task.py | 1 | 10517 | from __future__ import absolute_import
from datetime import datetime, timedelta
from grab.spider.error import SpiderMisuseError
from grab.base import copy_config
from grab.util.warning import warn
class BaseTask(object):
pass
class Task(BaseTask):
"""
Task for spider.
"""
def __init__(self, name=None, url=None, grab=None, grab_config=None,
priority=None, priority_set_explicitly=True,
network_try_count=0, task_try_count=1,
disable_cache=False, refresh_cache=False,
valid_status=None, use_proxylist=True,
cache_timeout=None, delay=None,
raw=False, callback=None,
fallback_name=None,
**kwargs):
"""
Create `Task` object.
If more than one of url, grab and grab_config options are non-empty
then they processed in following order:
* grab overwrite grab_config
* grab_config overwrite url
Args:
:param name: name of the task. After successful network operation
task's result will be passed to `task_<name>` method.
:param url: URL of network document. Any task requires `url` or
`grab` option to be specified.
:param grab: configured `Grab` instance. You can use that option in
case when `url` option is not enough. Do not forget to
configure `url` option of `Grab` instance because in this case
the `url` option of `Task` constructor will be overwritten
with `grab.config['url']`.
:param priority: - priority of the Task. Tasks with lower priority
will be processed earlier. By default each new task is assigned
with random priority from (80, 100) range.
:param priority_set_explicitly: - internal flag which tells if that
task priority was assigned manually or generated by spider
according to priority generation rules.
:param network_try_count: you'll probably will not need to use it.
It is used internally to control how many times this task was
restarted due to network errors. The `Spider` instance has
`network_try_limit` option. When `network_try_count` attribute
of the task exceeds the `network_try_limit` attribute then
processing of the task is abandoned.
:param task_try_count: the as `network_try_count` but it increased
only then you use `clone` method. Also you can set it manually.
It is useful if you want to restart the task after it was
cancelled due to multiple network errors. As you might guessed
there is `task_try_limit` option in `Spider` instance. Both
options `network_try_count` and `network_try_limit` guarantee
you that you'll not get infinite loop of restarting some task.
:param disable_cache: if `True` disable cache subsystem.
The document will be fetched from the Network and it will not
be saved to cache.
:param refresh_cache: if `True` the document will be fetched from
the Network and saved to cache.
:param valid_status: extra status codes which counts as valid
:param use_proxylist: it means to use proxylist which was
configured via `setup_proxylist` method of spider
:param delay: if specified tells the spider to schedule the task
and execute it after `delay` seconds
:param raw: if `raw` is True then the network response is
forwarding to the corresponding handler without any check of
HTTP status code of network error, if `raw` is False (by
default) then failed response is putting back to task queue or
if tries limit is reached then the processing of this request
is finished.
:param callback: if you pass some function in `callback` option
then the network response will be passed to this callback and
the usual 'task_*' handler will be ignored and no error will be
raised if such 'task_*' handler does not exist.
:param fallback_name: the name of method that is called when spider
gives up to do the task (due to multiple network errors)
Any non-standard named arguments passed to `Task` constructor will
be saved as attributes of the object. You can get their values
later as attributes or with `get` method which allows to use
default value if attribute does not exist.
"""
if name == 'generator':
# The name "generator" is restricted because
# `task_generator` handler could not be created because
# this name is already used for special method which
# generates new tasks
raise SpiderMisuseError('Task name could not be "generator"')
self.name = name
if url is None and grab is None and grab_config is None:
raise SpiderMisuseError('Either url, grab or grab_config argument '
'of Task constructor should not be None')
if url is not None and grab is not None:
raise SpiderMisuseError('Options url and grab could not be used '
'together')
if url is not None and grab_config is not None:
raise SpiderMisuseError('Options url and grab_config could not be '
'used together')
if grab is not None and grab_config is not None:
raise SpiderMisuseError(
'Options grab and grab_config could not be used together')
if grab:
self.setup_grab_config(grab.dump_config())
elif grab_config:
self.setup_grab_config(grab_config)
else:
self.grab_config = None
self.url = url
if valid_status is None:
self.valid_status = []
else:
self.valid_status = valid_status
self.process_delay_option(delay)
self.cache_timeout = cache_timeout
if cache_timeout is not None:
warn(
'Option `cache_timeout` is deprecated and'
' is not supported anymore'
)
self.fallback_name = fallback_name
self.priority_set_explicitly = priority_set_explicitly
self.priority = priority
self.network_try_count = network_try_count
self.task_try_count = task_try_count
self.disable_cache = disable_cache
self.refresh_cache = refresh_cache
self.use_proxylist = use_proxylist
self.raw = raw
self.callback = callback
self.coroutines_stack = []
for key, value in kwargs.items():
setattr(self, key, value)
def get(self, key, default=None):
"""
Return value of attribute or None if such attribute
does not exist.
"""
return getattr(self, key, default)
def process_delay_option(self, delay):
if delay:
self.schedule_time = datetime.utcnow() + timedelta(seconds=delay)
else:
self.schedule_time = None
def setup_grab_config(self, grab_config):
self.grab_config = copy_config(grab_config)
self.url = grab_config['url']
def clone(self, **kwargs):
"""
Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly.
"""
# First, create exact copy of the current Task object
attr_copy = self.__dict__.copy()
if attr_copy.get('grab_config') is not None:
del attr_copy['url']
if not attr_copy['priority_set_explicitly']:
attr_copy['priority'] = None
task = Task(**attr_copy)
# Reset some task properties if they have not
# been set explicitly in kwargs
if 'network_try_count' not in kwargs:
task.network_try_count = 0
if 'task_try_count' not in kwargs:
task.task_try_count = self.task_try_count + 1
if 'refresh_cache' not in kwargs:
task.refresh_cache = False
if 'disable_cache' not in kwargs:
task.disable_cache = False
if kwargs.get('url') is not None and kwargs.get('grab') is not None:
raise SpiderMisuseError('Options url and grab could not be '
'used together')
if (kwargs.get('url') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options url and grab_config could not '
'be used together')
if (kwargs.get('grab') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options grab and grab_config could not '
'be used together')
if kwargs.get('grab'):
task.setup_grab_config(kwargs['grab'].dump_config())
del kwargs['grab']
elif kwargs.get('grab_config'):
task.setup_grab_config(kwargs['grab_config'])
del kwargs['grab_config']
elif kwargs.get('url'):
task.url = kwargs['url']
if task.grab_config:
task.grab_config['url'] = kwargs['url']
del kwargs['url']
for key, value in kwargs.items():
setattr(task, key, value)
task.process_delay_option(None)
return task
def __repr__(self):
return '<Task: %s>' % self.url
def __lt__(self, other):
return self.priority < other.priority
def __eq__(self, other):
if not self.priority or not other.priority:
return True
else:
return self.priority == other.priority
def get_fallback_handler(self, spider):
if self.fallback_name:
return getattr(spider, self.fallback_name)
elif self.name:
fb_name = 'task_%s_fallback' % self.name
if hasattr(spider, fb_name):
return getattr(spider, fb_name)
else:
return None
| mit |
VinceZK/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py | 127 | 2897 | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
class StyleQueueTaskDelegate(PatchAnalysisTaskDelegate):
def parent_command(self):
return "style-queue"
class StyleQueueTask(PatchAnalysisTask):
def validate(self):
self._patch = self._delegate.refetch_patch(self._patch)
if self._patch.is_obsolete():
return False
if self._patch.bug().is_closed():
return False
if self._patch.review() == "-":
return False
return True
def _check_style(self):
return self._run_command([
"check-style-local",
"--non-interactive",
"--quiet",
],
"Style checked",
"Patch did not pass style check")
def _apply_watch_list(self):
return self._run_command([
"apply-watchlist-local",
self._patch.bug_id(),
],
"Watchlist applied",
"Unabled to apply watchlist")
def run(self):
if not self._clean():
return False
if not self._update():
return False
if not self._apply():
raise UnableToApplyPatch(self._patch)
self._apply_watch_list()
if not self._check_style():
return self.report_failure()
return True
| bsd-3-clause |
pli3/pli3-openpli3 | openembedded-core/meta/recipes-devtools/python/python-native/sitecustomize.py | 228 | 1125 | # OpenEmbedded sitecustomize.py (C) 2002-2008 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# GPLv2 or later
# Version: 20081123
# Features:
# * set proper default encoding
# * enable readline completion in the interactive interpreter
# * load command line history on startup
# * save command line history on exit
import os
def __exithandler():
try:
readline.write_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __registerExitHandler():
import atexit
atexit.register( __exithandler )
def __enableReadlineSupport():
readline.set_history_length( 1000 )
readline.parse_and_bind( "tab: complete" )
try:
readline.read_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __enableDefaultEncoding():
import sys
try:
sys.setdefaultencoding( "utf8" )
except LookupError:
pass
import sys
try:
import rlcompleter, readline
except ImportError:
pass
else:
__enableDefaultEncoding()
__registerExitHandler()
__enableReadlineSupport()
| gpl-2.0 |
dgarciam/Sick-Beard | lib/imdb/__init__.py | 50 | 41019 | """
imdb package.
This package can be used to retrieve information about a movie or
a person from the IMDb database.
It can fetch data through different media (e.g.: the IMDb web pages,
a SQL database, etc.)
Copyright 2004-2012 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
'available_access_systems']
__version__ = VERSION = '4.9'
# Import compatibility module (importing it is enough).
import _compat
import sys, os, ConfigParser, logging
from types import MethodType
from imdb import Movie, Person, Character, Company
import imdb._logging
from imdb._exceptions import IMDbError, IMDbDataAccessError, IMDbParserError
from imdb.utils import build_title, build_name, build_company_name
_aux_logger = logging.getLogger('imdbpy.aux')
# URLs of the main pages for movies, persons, characters and queries.
imdbURL_base = 'http://akas.imdb.com/'
# NOTE: the urls below will be removed in a future version.
# please use the values in the 'urls' attribute
# of the IMDbBase subclass instance.
# http://akas.imdb.com/title/
imdbURL_movie_base = '%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main = imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base = '%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main = imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base = '%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main = imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base = '%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main = imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main = imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250 = imdbURL_base + 'chart/top'
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100 = imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find = imdbURL_base + 'find?%s'
# Name of the configuration file.
confFileName = 'imdbpy.cfg'
class ConfigParserWithCase(ConfigParser.ConfigParser):
"""A case-sensitive parser for configuration files."""
def __init__(self, defaults=None, confFile=None, *args, **kwds):
"""Initialize the parser.
*defaults* -- defaults values.
*confFile* -- the file (or list of files) to parse."""
ConfigParser.ConfigParser.__init__(self, defaults=defaults)
if confFile is None:
dotFileName = '.' + confFileName
# Current and home directory.
confFile = [os.path.join(os.getcwd(), confFileName),
os.path.join(os.getcwd(), dotFileName),
os.path.join(os.path.expanduser('~'), confFileName),
os.path.join(os.path.expanduser('~'), dotFileName)]
if os.name == 'posix':
sep = getattr(os.path, 'sep', '/')
# /etc/ and /etc/conf.d/
confFile.append(os.path.join(sep, 'etc', confFileName))
confFile.append(os.path.join(sep, 'etc', 'conf.d',
confFileName))
else:
# etc subdirectory of sys.prefix, for non-unix systems.
confFile.append(os.path.join(sys.prefix, 'etc', confFileName))
for fname in confFile:
try:
self.read(fname)
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError), e:
_aux_logger.warn('Troubles reading config file: %s' % e)
# Stop at the first valid file.
if self.has_section('imdbpy'):
break
def optionxform(self, optionstr):
"""Option names are case sensitive."""
return optionstr
def _manageValue(self, value):
"""Custom substitutions for values."""
if not isinstance(value, (str, unicode)):
return value
vlower = value.lower()
if vlower in self._boolean_states:
return self._boolean_states[vlower]
elif vlower == 'none':
return None
return value
def get(self, section, option, *args, **kwds):
"""Return the value of an option from a given section."""
value = ConfigParser.ConfigParser.get(self, section, option,
*args, **kwds)
return self._manageValue(value)
def items(self, section, *args, **kwds):
"""Return a list of (key, value) tuples of items of the
given section."""
if section != 'DEFAULT' and not self.has_section(section):
return []
keys = ConfigParser.ConfigParser.options(self, section)
return [(k, self.get(section, k, *args, **kwds)) for k in keys]
def getDict(self, section):
"""Return a dictionary of items of the specified section."""
return dict(self.items(section))
def IMDb(accessSystem=None, *arguments, **keywords):
"""Return an instance of the appropriate class.
The accessSystem parameter is used to specify the kind of
the preferred access system."""
if accessSystem is None or accessSystem in ('auto', 'config'):
try:
cfg_file = ConfigParserWithCase(*arguments, **keywords)
# Parameters set by the code take precedence.
kwds = cfg_file.getDict('imdbpy')
if 'accessSystem' in kwds:
accessSystem = kwds['accessSystem']
del kwds['accessSystem']
else:
accessSystem = 'http'
kwds.update(keywords)
keywords = kwds
except Exception, e:
logging.getLogger('imdbpy').warn('Unable to read configuration' \
' file; complete error: %s' % e)
# It just LOOKS LIKE a bad habit: we tried to read config
# options from some files, but something is gone horribly
# wrong: ignore everything and pretend we were called with
# the 'http' accessSystem.
accessSystem = 'http'
if 'loggingLevel' in keywords:
imdb._logging.setLevel(keywords['loggingLevel'])
del keywords['loggingLevel']
if 'loggingConfig' in keywords:
logCfg = keywords['loggingConfig']
del keywords['loggingConfig']
try:
import logging.config
logging.config.fileConfig(os.path.expanduser(logCfg))
except Exception, e:
logging.getLogger('imdbpy').warn('unable to read logger ' \
'config: %s' % e)
if accessSystem in ('httpThin', 'webThin', 'htmlThin'):
logging.warn('httpThin was removed since IMDbPY 4.8')
accessSystem = 'http'
if accessSystem in ('http', 'web', 'html'):
from parser.http import IMDbHTTPAccessSystem
return IMDbHTTPAccessSystem(*arguments, **keywords)
elif accessSystem in ('mobile',):
from parser.mobile import IMDbMobileAccessSystem
return IMDbMobileAccessSystem(*arguments, **keywords)
elif accessSystem in ('local', 'files'):
# The local access system was removed since IMDbPY 4.2.
raise IMDbError('the local access system was removed since IMDbPY 4.2')
elif accessSystem in ('sql', 'db', 'database'):
try:
from parser.sql import IMDbSqlAccessSystem
except ImportError:
raise IMDbError('the sql access system is not installed')
return IMDbSqlAccessSystem(*arguments, **keywords)
else:
raise IMDbError('unknown kind of data access system: "%s"' \
% accessSystem)
def available_access_systems():
"""Return the list of available data access systems."""
asList = []
# XXX: trying to import modules is a good thing?
try:
from parser.http import IMDbHTTPAccessSystem
asList.append('http')
except ImportError:
pass
try:
from parser.mobile import IMDbMobileAccessSystem
asList.append('mobile')
except ImportError:
pass
try:
from parser.sql import IMDbSqlAccessSystem
asList.append('sql')
except ImportError:
pass
return asList
# XXX: I'm not sure this is a good guess.
# I suppose that an argument of the IMDb function can be used to
# set a default encoding for the output, and then Movie, Person and
# Character objects can use this default encoding, returning strings.
# Anyway, passing unicode strings to search_movie(), search_person()
# and search_character() methods is always safer.
encoding = getattr(sys.stdin, 'encoding', '') or sys.getdefaultencoding()
class IMDbBase:
"""The base class used to search for a movie/person/character and
to get a Movie/Person/Character object.
This class cannot directly fetch data of any kind and so you
have to search the "real" code into a subclass."""
# The name of the preferred access system (MUST be overridden
# in the subclasses).
accessSystem = 'UNKNOWN'
# Top-level logger for IMDbPY.
_imdb_logger = logging.getLogger('imdbpy')
# Whether to re-raise caught exceptions or not.
_reraise_exceptions = False
def __init__(self, defaultModFunct=None, results=20, keywordsResults=100,
*arguments, **keywords):
"""Initialize the access system.
If specified, defaultModFunct is the function used by
default by the Person, Movie and Character objects, when
accessing their text fields.
"""
# The function used to output the strings that need modification (the
# ones containing references to movie titles and person names).
self._defModFunct = defaultModFunct
# Number of results to get.
try:
results = int(results)
except (TypeError, ValueError):
results = 20
if results < 1:
results = 20
self._results = results
try:
keywordsResults = int(keywordsResults)
except (TypeError, ValueError):
keywordsResults = 100
if keywordsResults < 1:
keywordsResults = 100
self._keywordsResults = keywordsResults
self._reraise_exceptions = keywords.get('reraiseExceptions') or False
self.set_imdb_urls(keywords.get('imdbURL_base') or imdbURL_base)
def set_imdb_urls(self, imdbURL_base):
"""Set the urls used accessing the IMDb site."""
imdbURL_base = imdbURL_base.strip().strip('"\'')
if not imdbURL_base.startswith('http://'):
imdbURL_base = 'http://%s' % imdbURL_base
if not imdbURL_base.endswith('/'):
imdbURL_base = '%s/' % imdbURL_base
# http://akas.imdb.com/title/
imdbURL_movie_base='%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main=imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base='%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main=imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base='%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main=imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base='%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main=imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main=imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250=imdbURL_base + 'chart/top',
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100=imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find=imdbURL_base + 'find?%s'
self.urls = dict(
movie_base=imdbURL_movie_base,
movie_main=imdbURL_movie_main,
person_base=imdbURL_person_base,
person_main=imdbURL_person_main,
character_base=imdbURL_character_base,
character_main=imdbURL_character_main,
company_base=imdbURL_company_base,
company_main=imdbURL_company_main,
keyword_main=imdbURL_keyword_main,
top250=imdbURL_top250,
bottom100=imdbURL_bottom100,
find=imdbURL_find)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
# By default, do nothing.
return movieID
def _normalize_personID(self, personID):
"""Normalize the given personID."""
# By default, do nothing.
return personID
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
# By default, do nothing.
return characterID
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
# By default, do nothing.
return companyID
def _get_real_movieID(self, movieID):
"""Handle title aliases."""
# By default, do nothing.
return movieID
def _get_real_personID(self, personID):
"""Handle name aliases."""
# By default, do nothing.
return personID
def _get_real_characterID(self, characterID):
"""Handle character name aliases."""
# By default, do nothing.
return characterID
def _get_real_companyID(self, companyID):
"""Handle company name aliases."""
# By default, do nothing.
return companyID
def _get_infoset(self, prefname):
"""Return methods with the name starting with prefname."""
infoset = []
excludes = ('%sinfoset' % prefname,)
preflen = len(prefname)
for name in dir(self.__class__):
if name.startswith(prefname) and name not in excludes:
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
infoset.append(name[preflen:].replace('_', ' '))
return infoset
def get_movie_infoset(self):
"""Return the list of info set available for movies."""
return self._get_infoset('get_movie_')
def get_person_infoset(self):
"""Return the list of info set available for persons."""
return self._get_infoset('get_person_')
def get_character_infoset(self):
"""Return the list of info set available for characters."""
return self._get_infoset('get_character_')
def get_company_infoset(self):
"""Return the list of info set available for companies."""
return self._get_infoset('get_company_')
def get_movie(self, movieID, info=Movie.Movie.default_info, modFunct=None):
"""Return a Movie object for the given movieID.
The movieID is something used to univocally identify a movie;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Movie
object when accessing its text fields (like 'plot')."""
movieID = self._normalize_movieID(movieID)
movieID = self._get_real_movieID(movieID)
movie = Movie.Movie(movieID=movieID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
movie.set_mod_funct(modFunct)
self.update(movie, info)
return movie
get_episode = get_movie
def _search_movie(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_movie(self, title, results=None, _episodes=False):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(title, unicode):
title = unicode(title, encoding, 'replace')
if not _episodes:
res = self._search_movie(title, results)
else:
res = self._search_episode(title, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _search_episode(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_episode(self, title, results=None):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return;
this method searches only for titles of tv (mini) series' episodes."""
return self.search_movie(title, results=results, _episodes=True)
def get_person(self, personID, info=Person.Person.default_info,
modFunct=None):
"""Return a Person object for the given personID.
The personID is something used to univocally identify a person;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Person
object when accessing its text fields (like 'mini biography')."""
personID = self._normalize_personID(personID)
personID = self._get_real_personID(personID)
person = Person.Person(personID=personID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
person.set_mod_funct(modFunct)
self.update(person, info)
return person
def _search_person(self, name, results):
"""Return a list of tuples (personID, {personData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_person(self, name, results=None):
"""Return a list of Person objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_person(name, results)
return [Person.Person(personID=self._get_real_personID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_character(self, characterID, info=Character.Character.default_info,
modFunct=None):
"""Return a Character object for the given characterID.
The characterID is something used to univocally identify a character;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Character
object when accessing its text fields (like 'biography')."""
characterID = self._normalize_characterID(characterID)
characterID = self._get_real_characterID(characterID)
character = Character.Character(characterID=characterID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
character.set_mod_funct(modFunct)
self.update(character, info)
return character
def _search_character(self, name, results):
"""Return a list of tuples (characterID, {characterData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_character(self, name, results=None):
"""Return a list of Character objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_character(name, results)
return [Character.Character(characterID=self._get_real_characterID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_company(self, companyID, info=Company.Company.default_info,
modFunct=None):
"""Return a Company object for the given companyID.
The companyID is something used to univocally identify a company;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Company
object when accessing its text fields (none, so far)."""
companyID = self._normalize_companyID(companyID)
companyID = self._get_real_companyID(companyID)
company = Company.Company(companyID=companyID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
company.set_mod_funct(modFunct)
self.update(company, info)
return company
def _search_company(self, name, results):
"""Return a list of tuples (companyID, {companyData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_company(self, name, results=None):
"""Return a list of Company objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_company(name, results)
return [Company.Company(companyID=self._get_real_companyID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def _search_keyword(self, keyword, results):
"""Return a list of 'keyword' strings."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_keyword(self, keyword, results=None):
"""Search for existing keywords, similar to the given one."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
return self._search_keyword(keyword, results)
def _get_keyword(self, keyword, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_keyword(self, keyword, results=None):
"""Return a list of movies for the given keyword."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
res = self._get_keyword(keyword, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _get_top_bottom_movies(self, kind):
"""Return the list of the top 250 or bottom 100 movies."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
# This method must return a list of (movieID, {movieDict})
# tuples. The kind parameter can be 'top' or 'bottom'.
raise NotImplementedError('override this method')
def get_top250_movies(self):
"""Return the list of the top 250 movies."""
res = self._get_top_bottom_movies('top')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_bottom100_movies(self):
"""Return the list of the bottom 100 movies."""
res = self._get_top_bottom_movies('bottom')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def new_movie(self, *arguments, **keywords):
"""Return a Movie object."""
# XXX: not really useful...
if 'title' in keywords:
if not isinstance(keywords['title'], unicode):
keywords['title'] = unicode(keywords['title'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Movie.Movie(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_person(self, *arguments, **keywords):
"""Return a Person object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Person.Person(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_character(self, *arguments, **keywords):
"""Return a Character object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Character.Character(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_company(self, *arguments, **keywords):
"""Return a Company object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Company.Company(accessSystem=self.accessSystem,
*arguments, **keywords)
def update(self, mop, info=None, override=0):
"""Given a Movie, Person, Character or Company object with only
partial information, retrieve the required set of information.
info is the list of sets of information to retrieve.
If override is set, the information are retrieved and updated
even if they're already in the object."""
# XXX: should this be a method of the Movie/Person/Character/Company
# classes? NO! What for instances created by external functions?
mopID = None
prefix = ''
if isinstance(mop, Movie.Movie):
mopID = mop.movieID
prefix = 'movie'
elif isinstance(mop, Person.Person):
mopID = mop.personID
prefix = 'person'
elif isinstance(mop, Character.Character):
mopID = mop.characterID
prefix = 'character'
elif isinstance(mop, Company.Company):
mopID = mop.companyID
prefix = 'company'
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance')
if mopID is None:
# XXX: enough? It's obvious that there are Characters
# objects without characterID, so I think they should
# just do nothing, when an i.update(character) is tried.
if prefix == 'character':
return
raise IMDbDataAccessError( \
'the supplied object has null movieID, personID or companyID')
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if info is None:
info = mop.default_info
elif info == 'all':
if isinstance(mop, Movie.Movie):
info = self.get_movie_infoset()
elif isinstance(mop, Person.Person):
info = self.get_person_infoset()
elif isinstance(mop, Character.Character):
info = self.get_character_infoset()
else:
info = self.get_company_infoset()
if not isinstance(info, (tuple, list)):
info = (info,)
res = {}
for i in info:
if i in mop.current_info and not override:
continue
if not i:
continue
self._imdb_logger.debug('retrieving "%s" info set', i)
try:
method = getattr(aSystem, 'get_%s_%s' %
(prefix, i.replace(' ', '_')))
except AttributeError:
self._imdb_logger.error('unknown information set "%s"', i)
# Keeps going.
method = lambda *x: {}
try:
ret = method(mopID)
except Exception, e:
self._imdb_logger.critical('caught an exception retrieving ' \
'or parsing "%s" info set for mopID ' \
'"%s" (accessSystem: %s)',
i, mopID, mop.accessSystem, exc_info=True)
ret = {}
# If requested by the user, reraise the exception.
if self._reraise_exceptions:
raise
keys = None
if 'data' in ret:
res.update(ret['data'])
if isinstance(ret['data'], dict):
keys = ret['data'].keys()
if 'info sets' in ret:
for ri in ret['info sets']:
mop.add_to_current_info(ri, keys, mainInfoset=i)
else:
mop.add_to_current_info(i, keys)
if 'titlesRefs' in ret:
mop.update_titlesRefs(ret['titlesRefs'])
if 'namesRefs' in ret:
mop.update_namesRefs(ret['namesRefs'])
if 'charactersRefs' in ret:
mop.update_charactersRefs(ret['charactersRefs'])
mop.set_data(res, override=0)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbPersonID(self, personID):
"""Translate a personID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def _searchIMDb(self, kind, ton):
"""Search the IMDb akas server for the given title or name."""
# The Exact Primary search system has gone AWOL, so we resort
# to the mobile search. :-/
if not ton:
return None
aSystem = IMDb('mobile')
if kind == 'tt':
searchFunct = aSystem.search_movie
check = 'long imdb canonical title'
elif kind == 'nm':
searchFunct = aSystem.search_person
check = 'long imdb canonical name'
elif kind == 'char':
searchFunct = aSystem.search_character
check = 'long imdb canonical name'
elif kind == 'co':
# XXX: are [COUNTRY] codes included in the results?
searchFunct = aSystem.search_company
check = 'long imdb name'
try:
searchRes = searchFunct(ton)
except IMDbError:
return None
# When only one result is returned, assume it was from an
# exact match.
if len(searchRes) == 1:
return searchRes[0].getID()
for item in searchRes:
# Return the first perfect match.
if item[check] == ton:
return item.getID()
return None
def title2imdbID(self, title):
"""Translate a movie title (in the plain text data files format)
to an imdbID.
Try an Exact Primary Title search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('tt', title)
def name2imdbID(self, name):
"""Translate a person name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('tt', name)
def character2imdbID(self, name):
"""Translate a character name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('char', name)
def company2imdbID(self, name):
"""Translate a company name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('co', name)
def get_imdbID(self, mop):
"""Return the imdbID for the given Movie, Person, Character or Company
object."""
imdbID = None
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if isinstance(mop, Movie.Movie):
if mop.movieID is not None:
imdbID = aSystem.get_imdbMovieID(mop.movieID)
else:
imdbID = aSystem.title2imdbID(build_title(mop, canonical=0,
ptdf=1))
elif isinstance(mop, Person.Person):
if mop.personID is not None:
imdbID = aSystem.get_imdbPersonID(mop.personID)
else:
imdbID = aSystem.name2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Character.Character):
if mop.characterID is not None:
imdbID = aSystem.get_imdbCharacterID(mop.characterID)
else:
# canonical=0 ?
imdbID = aSystem.character2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Company.Company):
if mop.companyID is not None:
imdbID = aSystem.get_imdbCompanyID(mop.companyID)
else:
imdbID = aSystem.company2imdbID(build_company_name(mop))
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person or Character instance')
return imdbID
def get_imdbURL(self, mop):
"""Return the main IMDb URL for the given Movie, Person,
Character or Company object, or None if unable to get it."""
imdbID = self.get_imdbID(mop)
if imdbID is None:
return None
if isinstance(mop, Movie.Movie):
url_firstPart = imdbURL_movie_main
elif isinstance(mop, Person.Person):
url_firstPart = imdbURL_person_main
elif isinstance(mop, Character.Character):
url_firstPart = imdbURL_character_main
elif isinstance(mop, Company.Company):
url_firstPart = imdbURL_company_main
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance')
return url_firstPart % imdbID
def get_special_methods(self):
"""Return the special methods defined by the subclass."""
sm_dict = {}
base_methods = []
for name in dir(IMDbBase):
member = getattr(IMDbBase, name)
if isinstance(member, MethodType):
base_methods.append(name)
for name in dir(self.__class__):
if name.startswith('_') or name in base_methods or \
name.startswith('get_movie_') or \
name.startswith('get_person_') or \
name.startswith('get_company_') or \
name.startswith('get_character_'):
continue
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
sm_dict.update({name: member.__doc__})
return sm_dict
| gpl-3.0 |
t-hey/QGIS-Original | python/plugins/processing/script/AddScriptFromFileAction.py | 6 | 3506 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2014'
__copyright__ = '(C) 201, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtWidgets import QFileDialog, QMessageBox
from qgis.PyQt.QtCore import QFileInfo
from qgis.core import QgsApplication, QgsSettings
from processing.script.ScriptAlgorithm import ScriptAlgorithm
from processing.gui.ToolboxAction import ToolboxAction
from processing.script.WrongScriptException import WrongScriptException
from processing.script.ScriptUtils import ScriptUtils
pluginPath = os.path.split(os.path.dirname(__file__))[0]
class AddScriptFromFileAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Add script from file')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QgsApplication.getThemeIcon("/processingScript.svg")
def execute(self):
settings = QgsSettings()
lastDir = settings.value('Processing/lastScriptsDir', '')
filenames, selected_filter = QFileDialog.getOpenFileNames(self.toolbox,
self.tr('Script files', 'AddScriptFromFileAction'), lastDir,
self.tr('Script files (*.py *.PY)', 'AddScriptFromFileAction'))
if filenames:
validAlgs = 0
wrongAlgs = []
for filename in filenames:
try:
settings.setValue('Processing/lastScriptsDir',
QFileInfo(filename).absoluteDir().absolutePath())
script = ScriptAlgorithm(filename)
destFilename = os.path.join(ScriptUtils.scriptsFolders()[0], os.path.basename(filename))
with open(destFilename, 'w') as f:
f.write(script.script)
validAlgs += 1
except WrongScriptException:
wrongAlgs.append(os.path.basename(filename))
if validAlgs:
QgsApplication.processingRegistry().providerById('script').refreshAlgorithms()
if wrongAlgs:
QMessageBox.warning(self.toolbox,
self.tr('Error reading scripts', 'AddScriptFromFileAction'),
self.tr('The following files do not contain a valid script:\n-', 'AddScriptFromFileAction') +
"\n-".join(wrongAlgs))
| gpl-2.0 |
rikirenz/inspire-next | inspirehep/modules/records/json_ref_loader.py | 2 | 4458 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Resource-aware json reference loaders to be used with jsonref."""
from __future__ import absolute_import, division, print_function
import re
from flask import current_app
from jsonref import JsonLoader, JsonRef
from werkzeug.urls import url_parse
import jsonresolver
from jsonresolver.contrib.jsonref import json_loader_factory
from inspirehep.modules.pidstore.utils import get_pid_type_from_endpoint
from inspirehep.utils import record_getter
class AbstractRecordLoader(JsonLoader):
"""Base for resource-aware record loaders.
Resolves the refered resource by the given uri by first checking against
local resources.
"""
def get_record(self, pid_type, recid):
raise NotImplementedError()
def get_remote_json(self, uri, **kwargs):
parsed_uri = url_parse(uri)
# Add http:// protocol so uri.netloc is correctly parsed.
server_name = current_app.config.get('SERVER_NAME')
if not re.match('^https?://', server_name):
server_name = 'http://{}'.format(server_name)
parsed_server = url_parse(server_name)
if parsed_uri.netloc and parsed_uri.netloc != parsed_server.netloc:
return super(AbstractRecordLoader, self).get_remote_json(uri,
**kwargs)
path_parts = parsed_uri.path.strip('/').split('/')
if len(path_parts) < 2:
current_app.logger.error('Bad JSONref URI: {0}'.format(uri))
return None
endpoint = path_parts[-2]
pid_type = get_pid_type_from_endpoint(endpoint)
recid = path_parts[-1]
res = self.get_record(pid_type, recid)
return res
class ESJsonLoader(AbstractRecordLoader):
"""Resolve resources by retrieving them from Elasticsearch."""
def get_record(self, pid_type, recid):
try:
return record_getter.get_es_record(pid_type, recid)
except record_getter.RecordGetterError:
return None
class DatabaseJsonLoader(AbstractRecordLoader):
def get_record(self, pid_type, recid):
try:
return record_getter.get_db_record(pid_type, recid)
except record_getter.RecordGetterError:
return None
es_record_loader = ESJsonLoader()
db_record_loader = DatabaseJsonLoader()
SCHEMA_LOADER_CLS = json_loader_factory(
jsonresolver.JSONResolver(
plugins=['invenio_jsonschemas.jsonresolver']
)
)
"""Used in invenio-jsonschemas to resolve relative $ref."""
def replace_refs(obj, source='db'):
"""Replaces record refs in obj by bypassing HTTP requests.
Any reference URI that comes from the same server and references a resource
will be resolved directly either from the database or from Elasticsearch.
:param obj:
Dict-like object for which '$ref' fields are recursively replaced.
:param source:
List of sources from which to resolve the references. It can be any of:
* 'db' - resolve from Database
* 'es' - resolve from Elasticsearch
* 'http' - force using HTTP
:returns:
The same obj structure with the '$ref' fields replaced with the object
available at the given URI.
"""
loaders = {
'db': db_record_loader,
'es': es_record_loader,
'http': None
}
if source not in loaders:
raise ValueError('source must be one of {}'.format(loaders.keys()))
loader = loaders[source]
return JsonRef.replace_refs(obj, loader=loader, load_on_repr=False)
| gpl-3.0 |
shinglyu/moztrap | tests/view/results/test_finders.py | 5 | 1115 | """
Tests for results finder.
"""
from django.core.urlresolvers import reverse
from tests import case
class CaseColumnTest(case.DBTestCase):
"""Tests for results finder CaseColumn."""
@property
def column(self):
"""The Column class under test."""
from moztrap.view.results.finders import CaseColumn
return CaseColumn
def test_goto_url(self):
"""goto_url returns results list url for given RCV."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
"results_results",
)
rcv = self.F.RunCaseVersionFactory.create()
url = c.goto_url(rcv)
self.assertEqual(
url, reverse("results_results", kwargs={"rcv_id": rcv.id}))
def test_no_goto_url(self):
"""goto_url still returns None if no url name given."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
)
rcv = self.F.RunCaseVersionFactory.create()
self.assertIsNone(c.goto_url(rcv))
| bsd-2-clause |
Outernet-Project/librarian | tests/data/meta/test_archive.py | 1 | 19576 | import types
import mock
import pytest
import librarian.data.meta.archive as mod
# UNIT TESTS
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'MetaWrapper')
@mock.patch.object(mod.Archive.Processor, 'is_entry_point')
def test__analyze(is_entry_point, MetaWrapper, exts):
is_entry_point.return_value = True
expected = {'/path/to/file': MetaWrapper.return_value}
# trigger processor updates
archive = mod.Archive()
assert archive._analyze('/path/to/file', True) == expected
# the dict passed to save should contain the data from all processors
proc_cls = mod.Processor.for_type('generic')
exts.events.publish.assert_called_once_with(mod.Archive.ENTRY_POINT_FOUND,
path='/path/to/file',
content_type=1,
processor=proc_cls)
data = {'content_types': 1,
'path': '/path/to/file',
'mime_type': None,
'metadata': {u'': {}}}
MetaWrapper.assert_called_once_with(data)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_analyze')
def test_analyze_blocking(_analyze, exts):
archive = mod.Archive()
_analyze.return_value = {'path': 'metadata'}
assert archive.analyze('path') == _analyze.return_value
_analyze.assert_called_once_with('path', False)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_analyze')
def test_analyze_nonblocking_call(_analyze, exts):
archive = mod.Archive()
callback = mock.Mock()
assert archive.analyze(['path1', 'path2'], callback=callback) == {}
assert not _analyze.called
assert exts.tasks.schedule.called
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_analyze')
def test_analyze_nonblocking_result(_analyze, exts):
_analyze.side_effect = lambda x, p: {x: 'meta'}
exts.tasks.schedule.side_effect = lambda x: x()
archive = mod.Archive()
callback = mock.Mock()
assert archive.analyze(['path1', 'path2'], callback=callback) == {}
callback.assert_called_once_with({'path1': 'meta', 'path2': 'meta'})
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'analyze')
def test__scan_list_dir_fail(analyze, exts):
exts.fsal.list_dir.return_value = (False, [], [])
archive = mod.Archive()
assert list(archive._scan('path', False, None, None, 0, 0)) == []
assert not analyze.called
@pytest.mark.parametrize('maxdepth,levels', [
(0, 1),
(1, 2),
])
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'analyze')
def test__scan_maxdepth(analyze, exts, maxdepth, levels):
exts.fsal.list_dir.return_value = (True, [mock.Mock(rel_path='dir1')], [])
archive = mod.Archive()
expected = [analyze.return_value] * levels
assert list(archive._scan('path', False, None, maxdepth, 0, 0)) == expected
assert exts.fsal.list_dir.call_count == levels
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'analyze')
def test__scan_callback(analyze, exts):
callback = mock.Mock()
exts.fsal.list_dir.return_value = (True, [mock.Mock(rel_path='dir1')], [])
archive = mod.Archive()
assert list(archive._scan('path', False, callback, 1, 0, 1)) == []
callback.assert_called_once_with(analyze.return_value)
kwargs = dict(path='dir1',
partial=False,
callback=callback,
maxdepth=1,
depth=1,
delay=1)
exts.tasks.schedule.assert_called_once_with(archive.scan,
kwargs=kwargs,
delay=1)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'analyze')
def test__scan_generator(analyze, exts):
exts.fsal.list_dir.return_value = (True, [mock.Mock(rel_path='dir1')], [])
archive = mod.Archive()
# due to the above set up mock, it will yield infinitely so test only
# a couple cases
generator = archive._scan('path', False, None, None, 0, 0)
assert next(generator) == analyze.return_value
assert next(generator) == analyze.return_value
assert next(generator) == analyze.return_value
assert analyze.call_count == 3
@pytest.mark.parametrize('callback,ret_type', [
(None, types.GeneratorType),
(lambda x: x, list),
])
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'analyze')
def test_scan_return_value(analyze, exts, callback, ret_type):
exts.fsal.list_dir.return_value = (True, [], [])
archive = mod.Archive()
ret = archive.scan(callback=callback)
assert isinstance(ret, ret_type)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Processor, 'for_type')
def test__keep_supported(for_type, exts):
# set up mocked processors that just write something in the dict
proc = mock.Mock()
proc.return_value.can_process.side_effect = lambda x: x.endswith('txt')
for_type.return_value = proc
# check if paths were filtered according to processability
archive = mod.Archive()
ret = archive._keep_supported(['f1.txt', 'f3.jpg', 'file4.txt'], 'text')
assert ret == ['f1.txt', 'f3.jpg', 'file4.txt']
@pytest.mark.parametrize('src,expected,content_type', [
({'invalid': 2}, {}, 'generic',),
({'width': 1, 'test': 2, 'height': 3}, {'width': 1, 'height': 3}, 'image'),
])
@mock.patch.object(mod, 'exts')
def test__strip(exts, src, expected, content_type):
archive = mod.Archive()
assert archive._strip(src, content_type) == expected
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_refresh_parent')
@mock.patch.object(mod.Archive, 'get')
def test_parent_found(get, _refresh_parent, exts):
get.return_value = {'path': 'meta'}
archive = mod.Archive()
assert archive.parent('path') == 'meta'
get.assert_called_once_with('path', ignore_missing=True)
assert not _refresh_parent.called
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_refresh_parent')
@mock.patch.object(mod.Archive, 'get')
def test_parent_not_found(get, _refresh_parent, exts):
get.return_value = {}
archive = mod.Archive()
assert archive.parent('path') == _refresh_parent.return_value
get.assert_called_once_with('path', ignore_missing=True)
_refresh_parent.assert_called_once_with('path')
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_refresh_parent')
@mock.patch.object(mod.Archive, 'get')
def test_parent_force_refresh(get, _refresh_parent, exts):
archive = mod.Archive()
archive.parent('path', refresh='from source')
assert not get.called
_refresh_parent.assert_called_once_with('path', 'from source')
# INTEGRATION TESTS FOR DATABASE QUERIES
def merge_fs_with_meta(fs_data, metadata, content_type=None):
for fs_entry in fs_data:
# filter for specific content type, if specified
if (content_type is not None and
fs_entry['content_types'] & content_type != content_type):
continue
# merge all meta entries into their respective fs structure
fs_entry = dict(fs_entry)
for meta in metadata:
if meta['fs_id'] == fs_entry['id']:
lang = meta['language']
fs_entry.setdefault('metadata', {})
fs_entry['metadata'].setdefault(lang, {})
fs_entry['metadata'][lang][meta['key']] = unicode(meta['value'])
yield fs_entry
def compare_result_sets(result, expected):
assert len(expected) == len(result)
for (path, data) in expected.items():
item = result[path].unwrap()
for (key, value) in data.items():
assert item[key] == value
@mock.patch.object(mod, 'exts')
def test_for_parent(exts, populated_database):
(fs_data, metadata, databases) = populated_database
archive = mod.Archive(db=databases.librarian)
# test for path only
iter_merged = merge_fs_with_meta(fs_data, metadata)
expected = dict((item['path'], item) for item in iter_merged
if item['parent_id'] == fs_data[0]['id'])
result = archive.for_parent(fs_data[0]['path'])
# compare results against expected data
compare_result_sets(result, expected)
@mock.patch.object(mod, 'exts')
def test_for_parent_content_type(exts, populated_database):
(fs_data, metadata, databases) = populated_database
archive = mod.Archive(db=databases.librarian)
# test for path with specific content type filtering involved
content_types = mod.ContentTypes.from_bitmask(fs_data[0]['content_types'])
for ctype in content_types:
# filter expected data as the query should perform too
bitmask = mod.ContentTypes.to_bitmask(ctype)
iter_merged = merge_fs_with_meta(fs_data, metadata, bitmask)
expected = dict((item['path'], item) for item in iter_merged
if item['parent_id'] == fs_data[0]['id'])
result = archive.for_parent(fs_data[0]['path'], content_type=ctype)
# compare results against expected filtered data
compare_result_sets(result, expected)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'save_many')
@mock.patch.object(mod.Archive, 'analyze')
def test__attach_missing_none(analyze, save_many, exts):
archive = mod.Archive()
data = {'path': 'exists'}
ret = archive._attach_missing(['path'], data, False)
assert ret == data
assert not analyze.called
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'analyze')
def test__attach_missing_partial(analyze, exts):
archive = mod.Archive()
# only one path will be requested in this test, so it's safe to return
# only one entry
analyze.side_effect = lambda paths, **kw: {list(paths)[0]: 'found'}
data = {'path': 'exists'}
ret = archive._attach_missing(['path', 'missing'], data, True)
assert ret == {'path': 'exists', 'missing': 'found'}
paths = set(['missing'])
analyze.assert_has_calls([
mock.call(paths, callback=archive.save_many),
mock.call(paths, partial=True),
])
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'save_many')
@mock.patch.object(mod.Archive, 'analyze')
def test__attach_missing_impartial(analyze, save_many, exts):
archive = mod.Archive()
# only one path will be requested in this test, so it's safe to return
# only one entry
analyze.side_effect = lambda paths, **kw: {list(paths)[0]: 'found'}
data = {'path': 'exists'}
ret = archive._attach_missing(['path', 'missing'], data, False)
assert ret == {'path': 'exists', 'missing': 'found'}
analyze.assert_called_once_with(set(['missing']))
save_many.assert_called_once_with({'missing': 'found'})
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_keep_supported')
def test_get_none_supported(_keep_supported, exts):
db = exts.databases[mod.Archive.DATABASE_NAME]
_keep_supported.return_value = []
archive = mod.Archive()
assert archive.get(['path1', 'path2'], content_type='some') == {}
# make sure no db operations were executed at all
assert not db.Select.called
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_attach_missing')
@mock.patch.object(mod.Archive, '_keep_supported')
def test_get_ignore_missing(_keep_supported, _attach_missing, exts):
db = exts.databases[mod.Archive.DATABASE_NAME]
db.fetchiter.return_value = ()
paths = ['/path/invalid', '/another/invalid']
archive = mod.Archive()
assert archive.get(paths, ignore_missing=True) == {}
assert not _attach_missing.called
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_attach_missing')
@mock.patch.object(mod.Archive, '_keep_supported')
def test_get_attach_missing(_keep_supported, _attach_missing, exts,
strip_wrappers):
db = exts.databases[mod.Archive.DATABASE_NAME]
db.fetchiter.return_value = ()
paths = ['/path/invalid', '/another/invalid']
archive = mod.Archive()
# use unwrapped version because ``batched`` would turn the result
# into a dict
unwrapped = strip_wrappers(archive.get)
ret = unwrapped(archive, paths, ignore_missing=False)
_attach_missing.assert_called_once_with(paths, {}, True)
assert ret == _attach_missing.return_value
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, '_keep_supported')
def test_get(_keep_supported, exts, populated_database):
# keep supported just returns what it gets
_keep_supported.side_effect = lambda x, y: x
(fs_data, metadata, databases) = populated_database
archive = mod.Archive(db=databases.librarian)
# pick the last existing content type from the entries in db
found_types = mod.ContentTypes.from_bitmask(fs_data[-1]['content_types'])
bitmask = mod.ContentTypes.to_bitmask(found_types[-1])
# filter expected data as the query should perform too
expected = dict((item['path'], item)
for item in merge_fs_with_meta(fs_data, metadata, bitmask))
paths = expected.keys()
result = archive.get(paths,
content_type=found_types[-1],
ignore_missing=True)
# compare results against expected filtered data
compare_result_sets(result, expected)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'get')
@mock.patch.object(mod.Archive, 'save')
@mock.patch.object(mod.Archive, 'scan')
def test__refresh_parent_no_source(scan, save, get, exts):
path = '/path/parent'
scan.return_value = [{'/path/parent/child': mock.Mock(content_types=4)}]
get.return_value = {path: 'metadata'}
archive = mod.Archive()
assert archive._refresh_parent(path) == 'metadata'
scan.assert_called_once_with(path, partial=True, maxdepth=0)
save.assert_called_once_with({'path': path,
'type': mod.DIRECTORY_TYPE,
'mime_type': None,
'content_types': 5})
get.assert_called_once_with(path)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'get')
@mock.patch.object(mod.Archive, 'save')
@mock.patch.object(mod.Archive, 'scan')
def test__refresh_parent_from_source(scan, save, get, exts):
path = '/path/parent'
source = {'/path/parent/child': mock.Mock(content_types=4)}
get.return_value = {path: 'metadata'}
archive = mod.Archive()
assert archive._refresh_parent(path, source) == 'metadata'
assert not scan.called
save.assert_called_once_with({'path': path,
'type': mod.DIRECTORY_TYPE,
'mime_type': None,
'content_types': 5})
get.assert_called_once_with(path)
def pick_search_data(entries):
titleless = (mod.ContentTypes.GENERIC, mod.ContentTypes.DIRECTORY)
for entry in entries:
found_types = mod.ContentTypes.from_bitmask(entry['content_types'])
content_types = [ct for ct in found_types if ct not in titleless]
if not content_types:
# none of the found content types have a title key
continue
for (lang, meta) in entry['metadata'].items():
if isinstance(meta, dict) and 'title' in meta:
return (content_types[0], lang, meta['title'].split()[0])
@mock.patch.object(mod, 'exts')
def test_search(exts, populated_database):
(fs_data, metadata, databases) = populated_database
archive = mod.Archive(db=databases.librarian)
# pick an existing content type
entries = list(merge_fs_with_meta(fs_data, metadata))
# find data suitable for search
(content_type, lang, term) = pick_search_data(entries)
bitmask = mod.ContentTypes.to_bitmask(content_type)
# filter expected data as the query should perform too
expected = dict((item['path'], item) for item in entries
if item['content_types'] & bitmask == bitmask and
term in item['metadata'].get(lang, {}).get('title', ''))
result = archive.search(term, content_type=content_type, language=lang)
# compare results against expected filtered data
assert len(result) == len(expected)
assert sorted(result.keys()) == sorted(expected.keys())
@mock.patch.object(mod, 'exts')
def test_save(exts, databases):
mocked_cache = mock.Mock()
mocked_cache.get.return_value = None
exts.cache = mocked_cache
data = {
'type': mod.FILE_TYPE,
'path': '/path/to/file',
'mime_type': 'image/jpeg',
'content_types': mod.ContentTypes.to_bitmask(mod.ContentTypes.VIDEO),
'metadata': {
'en': {
'title': 'test',
'description': 'another',
}
}
}
archive = mod.Archive(db=databases.librarian)
wrapper = archive.save(data)
saved = wrapper.unwrap()
assert saved['path'] == data['path']
assert saved['content_types'] == data['content_types'] | 1
assert saved['type'] == data['type']
assert saved['metadata'] == data['metadata']
assert saved['mime_type'] == data['mime_type']
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Processor, 'for_path')
def test_remove(for_path, exts, populated_database, processors):
for_path.return_value = processors
(fs_data, metadata, databases) = populated_database
archive = mod.Archive(db=databases.librarian)
paths = [fs_data[0]['path'], fs_data[-1]['path']]
archive.remove(paths)
q = databases.librarian.Select(sets='fs',
where=databases.librarian.sqlin('path', paths))
assert databases.librarian.fetchall(q, paths) == []
# make sure cleanup function was called
calls = [mock.call(paths[0], fsal=archive._fsal),
mock.call().deprocess(),
mock.call(paths[1], fsal=archive._fsal),
mock.call().deprocess()]
for proc in processors:
proc.assert_has_calls(calls)
@pytest.mark.parametrize('get_meta,save_meta', [
(
{},
{'__auto__': {'main': 'index.html'}}
), (
{'/path/parent': mod.MetaWrapper({
'metadata': {'__auto__': {'main': 'start.html'}}
})},
{'__auto__': {'main': 'index.html'}}
)
])
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'save')
@mock.patch.object(mod.Archive, 'get')
def test__entry_point_found_better(get, save, exts, get_meta, save_meta):
archive = mod.Archive()
html_proc = mod.Processor.for_type('html')
get.return_value = get_meta
archive._entry_point_found('/path/parent/index.html', 'html', html_proc)
get.assert_called_once_with('/path/parent', ignore_missing=True)
expected = dict(metadata=save_meta,
path='/path/parent',
type=mod.DIRECTORY_TYPE,
mime_type=None,
content_types='html')
save.assert_called_once_with(expected)
@mock.patch.object(mod, 'exts')
@mock.patch.object(mod.Archive, 'save')
@mock.patch.object(mod.Archive, 'get')
def test__entry_point_found_worse(get, save, exts):
archive = mod.Archive()
html_proc = mod.Processor.for_type('html')
data = {'/path/parent': mod.MetaWrapper({
'metadata': {'__auto__': {'main': 'index.html'}}
})}
get.return_value = data
archive._entry_point_found('/path/parent/main.html', 'html', html_proc)
get.assert_called_once_with('/path/parent', ignore_missing=True)
assert not save.called
| gpl-3.0 |
dsfsdgsbngfggb/odoo | addons/account_analytic_plans/wizard/__init__.py | 445 | 1117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_plan_create_model
import account_crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NaturalGIS/naturalgis_qgis | python/plugins/processing/gui/PointSelectionPanel.py | 45 | 3329 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointSelectionPanel.py
---------------------
Date : February 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
import os
import warnings
from qgis.core import (QgsProject,
QgsReferencedPointXY,
QgsPointXY)
from qgis.PyQt import uic
from qgis.utils import iface
from processing.gui.PointMapTool import PointMapTool
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class PointSelectionPanel(BASE, WIDGET):
def __init__(self, dialog, default=None):
super(PointSelectionPanel, self).__init__(None)
self.setupUi(self)
self.btnSelect.clicked.connect(self.selectOnCanvas)
self.dialog = dialog
self.crs = QgsProject.instance().crs()
if iface is not None:
canvas = iface.mapCanvas()
self.prevMapTool = canvas.mapTool()
self.tool = PointMapTool(canvas)
self.tool.canvasClicked.connect(self.updatePoint)
self.tool.complete.connect(self.pointPicked)
else:
self.prevMapTool = None
self.tool = None
if default:
tokens = str(default).split(',')
if len(tokens) == 2:
try:
float(tokens[0])
float(tokens[1])
self.leText.setText(str(default))
except:
pass
def selectOnCanvas(self):
canvas = iface.mapCanvas()
canvas.setMapTool(self.tool)
self.dialog.showMinimized()
def updatePoint(self, point, button):
s = '{},{}'.format(point.x(), point.y())
self.crs = QgsProject.instance().crs()
if self.crs.isValid():
s += ' [' + self.crs.authid() + ']'
self.leText.setText(s)
def pointPicked(self):
canvas = iface.mapCanvas()
canvas.setMapTool(self.prevMapTool)
self.dialog.showNormal()
self.dialog.raise_()
self.dialog.activateWindow()
def getValue(self):
if str(self.leText.text()).strip() != '':
return str(self.leText.text())
else:
return None
def setPointFromString(self, s):
self.leText.setText(s)
| gpl-2.0 |
taaviteska/django | tests/template_tests/syntax_tests/i18n/test_blocktrans.py | 27 | 21381 | import os
from threading import local
from django.template import Context, Template, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real
from ...utils import setup
from .base import MultipleLocaleActivationTestCase, extended_locale_paths, here
class I18nBlockTransTagTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'i18n03': '{% load i18n %}{% blocktrans %}{{ anton }}{% endblocktrans %}'})
def test_i18n03(self):
"""simple translation of a variable"""
output = self.engine.render_to_string('i18n03', {'anton': 'Å'})
self.assertEqual(output, 'Å')
@setup({'i18n04': '{% load i18n %}{% blocktrans with berta=anton|lower %}{{ berta }}{% endblocktrans %}'})
def test_i18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string('i18n04', {'anton': 'Å'})
self.assertEqual(output, 'å')
@setup({'legacyi18n04': '{% load i18n %}'
'{% blocktrans with anton|lower as berta %}{{ berta }}{% endblocktrans %}'})
def test_legacyi18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string('legacyi18n04', {'anton': 'Å'})
self.assertEqual(output, 'å')
@setup({'i18n05': '{% load i18n %}{% blocktrans %}xxx{{ anton }}xxx{% endblocktrans %}'})
def test_i18n05(self):
"""simple translation of a string with interpolation"""
output = self.engine.render_to_string('i18n05', {'anton': 'yyy'})
self.assertEqual(output, 'xxxyyyxxx')
@setup({'i18n07': '{% load i18n %}'
'{% blocktrans count counter=number %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_i18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string('i18n07', {'number': 1})
self.assertEqual(output, 'singular')
@setup({'legacyi18n07': '{% load i18n %}'
'{% blocktrans count number as counter %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_legacyi18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string('legacyi18n07', {'number': 1})
self.assertEqual(output, 'singular')
@setup({'i18n08': '{% load i18n %}'
'{% blocktrans count number as counter %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_i18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string('i18n08', {'number': 2})
self.assertEqual(output, '2 plural')
@setup({'legacyi18n08': '{% load i18n %}'
'{% blocktrans count counter=number %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_legacyi18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string('legacyi18n08', {'number': 2})
self.assertEqual(output, '2 plural')
@setup({'i18n17': '{% load i18n %}'
'{% blocktrans with berta=anton|escape %}{{ berta }}{% endblocktrans %}'})
def test_i18n17(self):
"""
Escaping inside blocktrans and trans works as if it was directly in the
template.
"""
output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n18': '{% load i18n %}'
'{% blocktrans with berta=anton|force_escape %}{{ berta }}{% endblocktrans %}'})
def test_i18n18(self):
output = self.engine.render_to_string('i18n18', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n19': '{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}'})
def test_i18n19(self):
output = self.engine.render_to_string('i18n19', {'andrew': 'a & b'})
self.assertEqual(output, 'a & b')
@setup({'i18n21': '{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}'})
def test_i18n21(self):
output = self.engine.render_to_string('i18n21', {'andrew': mark_safe('a & b')})
self.assertEqual(output, 'a & b')
@setup({'legacyi18n17': '{% load i18n %}'
'{% blocktrans with anton|escape as berta %}{{ berta }}{% endblocktrans %}'})
def test_legacyi18n17(self):
output = self.engine.render_to_string('legacyi18n17', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'legacyi18n18': '{% load i18n %}'
'{% blocktrans with anton|force_escape as berta %}'
'{{ berta }}{% endblocktrans %}'})
def test_legacyi18n18(self):
output = self.engine.render_to_string('legacyi18n18', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n26': '{% load i18n %}'
'{% blocktrans with extra_field=myextra_field count counter=number %}'
'singular {{ extra_field }}{% plural %}plural{% endblocktrans %}'})
def test_i18n26(self):
"""
translation of plural form with extra field in singular form (#13568)
"""
output = self.engine.render_to_string('i18n26', {'myextra_field': 'test', 'number': 1})
self.assertEqual(output, 'singular test')
@setup({'legacyi18n26': '{% load i18n %}'
'{% blocktrans with myextra_field as extra_field count number as counter %}'
'singular {{ extra_field }}{% plural %}plural{% endblocktrans %}'})
def test_legacyi18n26(self):
output = self.engine.render_to_string('legacyi18n26', {'myextra_field': 'test', 'number': 1})
self.assertEqual(output, 'singular test')
@setup({'i18n27': '{% load i18n %}{% blocktrans count counter=number %}'
'{{ counter }} result{% plural %}{{ counter }} results'
'{% endblocktrans %}'})
def test_i18n27(self):
"""translation of singular form in Russian (#14126)"""
with translation.override('ru'):
output = self.engine.render_to_string('i18n27', {'number': 1})
self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442')
@setup({'legacyi18n27': '{% load i18n %}'
'{% blocktrans count number as counter %}{{ counter }} result'
'{% plural %}{{ counter }} results{% endblocktrans %}'})
def test_legacyi18n27(self):
with translation.override('ru'):
output = self.engine.render_to_string('legacyi18n27', {'number': 1})
self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442')
@setup({'i18n28': '{% load i18n %}'
'{% blocktrans with a=anton b=berta %}{{ a }} + {{ b }}{% endblocktrans %}'})
def test_i18n28(self):
"""simple translation of multiple variables"""
output = self.engine.render_to_string('i18n28', {'anton': 'α', 'berta': 'β'})
self.assertEqual(output, 'α + β')
@setup({'legacyi18n28': '{% load i18n %}'
'{% blocktrans with anton as a and berta as b %}'
'{{ a }} + {{ b }}{% endblocktrans %}'})
def test_legacyi18n28(self):
output = self.engine.render_to_string('legacyi18n28', {'anton': 'α', 'berta': 'β'})
self.assertEqual(output, 'α + β')
# blocktrans handling of variables which are not in the context.
# this should work as if blocktrans was not there (#19915)
@setup({'i18n34': '{% load i18n %}{% blocktrans %}{{ missing }}{% endblocktrans %}'})
def test_i18n34(self):
output = self.engine.render_to_string('i18n34')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n34_2': '{% load i18n %}{% blocktrans with a=\'α\' %}{{ missing }}{% endblocktrans %}'})
def test_i18n34_2(self):
output = self.engine.render_to_string('i18n34_2')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n34_3': '{% load i18n %}{% blocktrans with a=anton %}{{ missing }}{% endblocktrans %}'})
def test_i18n34_3(self):
output = self.engine.render_to_string(
'i18n34_3', {'anton': '\xce\xb1'})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n37': '{% load i18n %}'
'{% trans "Page not found" as page_not_found %}'
'{% blocktrans %}Error: {{ page_not_found }}{% endblocktrans %}'})
def test_i18n37(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n37')
self.assertEqual(output, 'Error: Seite nicht gefunden')
# blocktrans tag with asvar
@setup({'i18n39': '{% load i18n %}'
'{% blocktrans asvar page_not_found %}Page not found{% endblocktrans %}'
'>{{ page_not_found }}<'})
def test_i18n39(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n39')
self.assertEqual(output, '>Seite nicht gefunden<')
@setup({'i18n40': '{% load i18n %}'
'{% trans "Page not found" as pg_404 %}'
'{% blocktrans with page_not_found=pg_404 asvar output %}'
'Error: {{ page_not_found }}'
'{% endblocktrans %}'})
def test_i18n40(self):
output = self.engine.render_to_string('i18n40')
self.assertEqual(output, '')
@setup({'i18n41': '{% load i18n %}'
'{% trans "Page not found" as pg_404 %}'
'{% blocktrans with page_not_found=pg_404 asvar output %}'
'Error: {{ page_not_found }}'
'{% endblocktrans %}'
'>{{ output }}<'})
def test_i18n41(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n41')
self.assertEqual(output, '>Error: Seite nicht gefunden<')
@setup({'template': '{% load i18n %}{% blocktrans asvar %}Yes{% endblocktrans %}'})
def test_blocktrans_syntax_error_missing_assignment(self):
msg = "No argument provided to the 'blocktrans' tag for the asvar option."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% blocktrans %}%s{% endblocktrans %}'})
def test_blocktrans_tag_using_a_string_that_looks_like_str_fmt(self):
output = self.engine.render_to_string('template')
self.assertEqual(output, '%s')
class TranslationBlockTransTagTests(SimpleTestCase):
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""{% blocktrans %} takes message contexts into account (#14806)."""
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
# Nonexistent context
t = Template('{% load i18n %}{% blocktrans context "nonexistent" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context... using a literal
t = Template('{% load i18n %}{% blocktrans context "month name" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context "verb" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'count'
t = Template(
'{% load i18n %}{% blocktrans count number=1 context "super search" %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 Super-Ergebnis')
t = Template(
'{% load i18n %}{% blocktrans count number=2 context "super search" %}{{ number }}'
' super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 Super-Ergebnisse')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=1 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 anderen Super-Ergebnis')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=2 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Using 'with'
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "comment count" %}'
'There are {{ num_comments }} comments{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "other comment count" %}'
'There are {{ num_comments }} comments{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Andere: Es gibt 5 Kommentare')
# Using trimmed
t = Template(
'{% load i18n %}{% blocktrans trimmed %}\n\nThere\n\t are 5 '
'\n\n comments\n{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'There are 5 comments')
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "comment count" trimmed %}\n\n'
'There are \t\n \t {{ num_comments }} comments\n\n{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=2 trimmed %}\n'
'{{ number }} super \n result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Misuses
with self.assertRaises(TemplateSyntaxError):
Template('{% load i18n %}{% blocktrans context with month="May" %}{{ month }}{% endblocktrans %}')
with self.assertRaises(TemplateSyntaxError):
Template('{% load i18n %}{% blocktrans context %}{% endblocktrans %}')
with self.assertRaises(TemplateSyntaxError):
Template(
'{% load i18n %}{% blocktrans count number=2 context %}'
'{{ number }} super result{% plural %}{{ number }}'
' super results{% endblocktrans %}'
)
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering (#16516).
(%(person)s is translated as %(personne)s in fr.po).
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My name is James.')
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering (#18393).
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My other name is James.')
class MultipleLocaleActivationBlockTransTests(MultipleLocaleActivationTestCase):
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override('fr'):
self.assertEqual(
Template("{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}").render(Context({})),
'Oui'
)
def test_multiple_locale_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
class MiscTests(SimpleTestCase):
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = Template("{% load i18n %}{% blocktrans %}The result was {{ percent }}%{% endblocktrans %}")
t_plur = Template(
"{% load i18n %}{% blocktrans count num as number %}"
"{{ percent }}% represents {{ num }} object{% plural %}"
"{{ percent }}% represents {{ num }} objects{% endblocktrans %}"
)
with translation.override('de'):
self.assertEqual(t_sing.render(Context({'percent': 42})), 'Das Ergebnis war 42%')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '42% stellt 1 Objekt dar')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '42% stellt 4 Objekte dar')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktrans(self):
"""
Python's %-formatting is properly escaped in blocktrans, singular, or
plural.
"""
t_sing = Template("{% load i18n %}{% blocktrans %}There are %(num_comments)s comments{% endblocktrans %}")
t_plur = Template(
"{% load i18n %}{% blocktrans count num as number %}"
"%(percent)s% represents {{ num }} object{% plural %}"
"%(percent)s% represents {{ num }} objects{% endblocktrans %}"
)
with translation.override('de'):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(t_sing.render(Context({'num_comments': 42})), 'There are %(num_comments)s comments')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '%(percent)s% represents 1 object')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '%(percent)s% represents 4 objects')
| bsd-3-clause |
youngking/learn-you-some-erlang | html2rest.py | 1 | 11988 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2006-2011 Gerard Flanagan
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-----------------------------------------------------------------------------
__version__ = '0.2.2'
import sys
import os
import re
from sgmllib import SGMLParser
from StringIO import StringIO
from textwrap import TextWrapper
from urllib2 import urlparse
CODEBLOCK = '::'
BLOCKTAGS = ['div', 'blockquote']
IGNORETAGS = ['title', 'style', 'script']
UNDERLINES = list('=-~`+;')
# Fredrik Lundh, http://effbot.org/zone/re-sub.html
def unescape(text, to_encoding='utf8'):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3].lower() == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
import htmlentitydefs
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text).encode(to_encoding)
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
def BeautifulSoup(text, *args, **kw):
return text
def readsoup(html, convert='html', encoding='utf8'):
#for br in ['<br>', '<br/>', '<br />']:
# text = text.replace(br, '\n')
# text = text.replace(br.upper(), '\n')
return str(BeautifulSoup(html, convertEntities=convert,
fromEncoding=encoding))
def html2rest(html, writer=sys.stdout, encoding='utf8', relto=None, preprocess=None):
relroot = relpath = None
if relto:
parsed = urlparse.urlparse(relto)
relroot = parsed.scheme + '://' + parsed.netloc
relpath = relroot + parsed.path
if relpath[-1] != '/':
relpath += '/'
if preprocess:
html = preprocess(html, encoding=encoding)
parser = Parser(writer, encoding, relroot, relpath)
#parser.feed(readsoup(html))
parser.feed(html.decode(encoding))
parser.close()
class LineBuffer(object):
def __init__(self):
self._lines = []
self._wrapper = TextWrapper()
def __len__(self):
return len(self._lines)
def __getitem__(self, i):
return self._lines[i]
def __setitem__(self, i, value):
self._lines[i] = value
def clear(self):
self._lines[:] = []
def read(self):
return '\n'.join(self._lines)
def write(self, s):
#normalise whitespace
s = ' '.join(s.split())
self._lines.extend(self._wrapper.wrap(s))
def rawwrite(self, s):
self._lines.extend(s.splitlines())
def indent(self, numspaces=4, start=0):
linebuf = self._lines
n = len(linebuf)
if n > start:
indent = ' ' * numspaces
for i in range(start, n):
linebuf[i] = indent + linebuf[i]
def lstrip(self):
linebuf = self._lines
for i in range(len(linebuf)):
linebuf[i] = linebuf[i].lstrip()
class Parser(SGMLParser):
def __init__(self, writer=sys.stdout, encoding='utf8', relroot=None, relpath=None):
SGMLParser.__init__(self)
self.writer = writer
self.encoding = encoding
self.relroot = relroot
self.relpath = relpath
self.stringbuffer = StringIO()
self.linebuffer = LineBuffer()
self.verbatim = False
self.lists = []
self.ignoredata = False
self.inblock = 0
self.nobreak = False
self.hrefs = {}
def close(self):
self.writeline()
SGMLParser.close(self)
def flush(self):
if self.linebuffer:
if self.inblock > 1:
indent = 4 * (self.inblock - 1)
self.linebuffer.indent(indent)
self.writer.write(unescape(self.linebuffer.read(), self.encoding))
self.linebuffer.clear()
def flush_stringbuffer(self):
sbuf = self.stringbuffer.getvalue()
if not sbuf:
return
elif self.linebuffer:
self.linebuffer[-1] += sbuf
else:
self.linebuffer.write(sbuf)
self.clear_stringbuffer()
def clear_stringbuffer(self):
#self.stringbuffer.reset()
self.stringbuffer.seek(0)
self.stringbuffer.truncate()
def data(self, text):
self.stringbuffer.write(text)
def pending(self):
return self.stringbuffer.tell() or self.linebuffer
def write(self, text=''):
self.flush_stringbuffer()
self.flush()
self.writer.write(unescape(text))
def writeline(self, text=''):
self.write(text + '\n')
def writestartblock(self, text=''):
if self.pending():
self.writeline()
self.writeline()
self.writeline(text)
def writeendblock(self, text=''):
self.writeline(text)
self.writeline()
def writeblock(self, text=''):
self.writestartblock(text)
self.writeline()
def handle_data(self, data):
if self.ignoredata:
return
elif self.verbatim:
self.data(data)
else:
if '#pending' in self.hrefs:
self.hrefs[self.hrefs['#pending']] = data
self.data(' '.join(data.splitlines()))
def unknown_starttag(self, tag, attrs):
if tag in IGNORETAGS:
self.ignoredata = True
elif len(tag) == 2 and tag[0] == 'h':
self.writestartblock()
elif tag == 'br':
if self.verbatim:
self.data('\n')
elif not self.inblock:
self.writeline()
else:
self.data(' ')
elif not self.verbatim:
self.data(' ')
def unknown_endtag(self, tag):
self.ignoredata = False
if len(tag) == 2 and tag[0] == 'h':
self.flush_stringbuffer()
if self.linebuffer:
linebuf = self.linebuffer
linebuf[-1] = linebuf[-1].strip()
char = UNDERLINES[int(tag[1])-1]
linebuf.write(char * len(linebuf[-1]))
self.writeline()
#elif tag in BLOCKTAGS and self.pending():
# if self.lists:
# self.end_li()
# else:
# self.writeline()
elif not self.verbatim:
self.data(' ')
def start_a(self, attrs):
href = dict(attrs).get('href', None)
if not href or href.startswith('#'):
return
elif self.relroot and self.relpath and 'mailto:' not in href:
if href.startswith('/'):
href = self.relroot + href
elif '://' not in href:
href = self.relpath + href
self.data('`')
self.hrefs['#pending'] = href
def end_a(self):
if '#pending' in self.hrefs:
self.data('`_')
del self.hrefs['#pending']
def start_img(self, attrs):
attrs = dict(attrs)
alt = attrs.get('alt', None)
self.writeline()
self.writeline()
self.writeline(".. image:: %s" % attrs['src'])
if alt:
self.writeline(" :alt: %s" % alt)
self.writeline()
def end_img(self):
self.writeline()
def start_pre(self, attrs):
if self.lists:
self.end_li()
self.writeline()
#self.inblock += 1
self.verbatim = True
self.writeblock(CODEBLOCK)
def end_pre(self):
sbuf = self.stringbuffer.getvalue()
if sbuf:
self.linebuffer.rawwrite(sbuf)
self.linebuffer.indent(4)
self.clear_stringbuffer()
self.writeendblock()
#self.inblock -= 1
self.verbatim = False
def start_ul(self, attrs):
if self.lists:
self.end_li()
self.writeline()
else:
self.writeline()
self.lists.append('+ ')
self.inblock += 1
def end_ul(self):
self.end_li()
self.lists.pop()
self.inblock -= 1
if self.inblock:
self.writeline()
else:
self.writeendblock()
def start_ol(self, attrs):
if self.lists:
self.end_li()
self.writeline()
else:
self.writeline()
self.lists.append('#. ')
self.inblock += 1
def end_ol(self):
self.end_li()
self.lists.pop()
self.inblock -= 1
if self.inblock:
self.writeline()
else:
self.writeendblock()
def start_p(self, attrs):
if self.verbatim:
self.writeline()
elif not self.inblock:
self.writeline()
def end_p(self):
if self.inblock:
#self.flush_stringbuffer()
if self.verbatim:
self.writeline()
else:
return
else:
self.linebuffer.lstrip()
self.writeline()
def start_li(self, attrs):
self.writeline()
self.data(self.lists[-1])
def end_li(self):
self.flush_stringbuffer()
linebuf = self.linebuffer
if linebuf and linebuf[0] and linebuf[0].lstrip()[:2] in ['+ ', '#.']:
start=1
else:
# the start of the <li> has already been written, perhaps because
# there was a <pre> block
start = 0
self.linebuffer.indent(len(self.lists[-1]), start=start)
self.write()
def start_dl(self, attrs):
self.writeline()
self.inblock += 1
self.nobreak = True
def end_dl(self):
self.nobreak = False
self.writeline()
self.inblock -= 1
def start_dt(self, attrs):
self.data(':')
def end_dt(self):
self.data(':')
def start_dd(self, attrs):
self.data(' ')
def end_dd(self):
self.flush_stringbuffer()
self.linebuffer.indent(2, start=1)
self.writeline()
def start_em(self, attrs):
self.data(' *')
def end_em(self):
self.data('*')
def start_b(self, attrs):
self.data(' **')
def end_b(self):
self.data('**')
def start_code(self, attrs):
self.data(' ``')
def end_code(self):
self.data('``')
def start_span(self, attrs):
pass
def end_span(self):
pass
def start_body(self, attrs):
pass
def end_body(self):
self.end_p()
for href, link in self.hrefs.items():
if href[0] != '#':
self.writeline('.. _%s: %s' % (link, href))
self.end_p()
| mit |
stasic/debian-unbound | pythonmod/doc/conf.py | 20 | 5787 | # -*- coding: utf-8 -*-
#
# Unbound scripting interface documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'../..')))
#print sys.path
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Unbound scriptable interface'
copyright = '2009, Zdenek Vasicek, Marek Vavrusa'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'unbound_interface'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Unbound_interface.tex', 'Unbound scriptable interface',
'Zdenek Vasicek, Marek Vavrusa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause |
vikatory/kbengine | kbe/src/lib/python/Lib/lib2to3/pgen2/driver.py | 89 | 5153 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import io
import os
import logging
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(io.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except OSError as e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(message)s')
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
| lgpl-3.0 |
stephen-bunn/sandpaper | tests/rules/_common.py | 1 | 3598 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2017 Stephen Bunn (stephen@bunn.io)
# MIT License <https://opensource.org/licenses/MIT>
import os
import abc
import glob
import filecmp
import unittest
import sandpaper
import six
import braceexpand
class BaseRuleTest(six.with_metaclass(abc.ABCMeta, unittest.TestCase)):
""" The base rule test.
"""
@abc.abstractproperty
def rule_name(self):
""" Required rule name for static path discovery.
"""
raise NotImplementedError()
@abc.abstractproperty
def rule_arguments(self):
""" Required rule arguments as a tuple (*args, **kwargs).
"""
raise NotImplementedError()
@abc.abstractproperty
def rule_group(self):
""" Required rule group for static path discovery.
"""
raise NotImplementedError()
@property
def static_dir(self):
""" The path to the rule's static testing files.
"""
if not hasattr(self, '_static_dir'):
self._static_dir = os.path.abspath(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
('static/rules/{self.rule_name}').format(self=self)
))
return self._static_dir
def _evaluate_glob(self, pattern):
discovered = set()
for variation in braceexpand.braceexpand(pattern):
for path in glob.glob(variation):
discovered.add(path)
return discovered
def _get_static_glob(self, post=False):
""" Gets a glob for testing static files.
"""
return os.path.join(
self.static_dir,
'{flag}.{{xls{{,x}},{{c,t}}sv}}'
).format(flag=('post' if post else 'pre'))
def setUp(self):
""" Setup the test.
"""
self.paper = sandpaper.SandPaper()
def tearDown(self):
""" Clear the execution of the test.
"""
del self.paper
def test_exists(self):
""" Test that the rule exists.
"""
self.assertTrue(callable(getattr(self.paper, self.rule_name)))
def test_addition(self):
""" Test the addition of the rule.
"""
self.assertIsInstance(getattr(self.paper, 'rules'), list)
self.assertEqual(len(getattr(self.paper, 'rules')), 0)
self.assertIsInstance(getattr(self.paper, self.rule_group), set)
self.assertEqual(getattr(self.paper, self.rule_group), set())
getattr(self.paper, self.rule_name)()
self.assertGreater(len(getattr(self.paper, 'rules')), 0)
self.assertIsInstance(getattr(self.paper, self.rule_group), set)
self.assertGreater(len(getattr(self.paper, self.rule_group)), 0)
del self.paper.rules[:]
def test_application(self):
""" Tests the implementation of the rule.
"""
getattr(self.paper, self.rule_name)(
*self.rule_arguments[0],
**self.rule_arguments[-1]
)
(pre_paths, sanded_paths, post_paths,) = (
self._evaluate_glob(self._get_static_glob(post=False)),
[],
self._evaluate_glob(self._get_static_glob(post=True)),
)
for path in pre_paths:
(name, ext,) = os.path.splitext(path)
sanded_paths.append(('{name}.sanded{ext}').format(**locals()))
for (from_file, to_file, result_file,) in \
zip(pre_paths, sanded_paths, post_paths):
applied = self.paper.apply(from_file, to_file)
self.assertTrue(filecmp.cmp(to_file, result_file))
| mit |
JFriel/honours_project | venv/lib/python2.7/site-packages/nltk/corpus/reader/plaintext.py | 7 | 9289 | # Natural Language Toolkit: Plaintext Corpus Reader
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# Nitin Madnani <nmadnani@umiacs.umd.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A reader for corpora that consist of plaintext documents.
"""
import codecs
import nltk.data
from nltk.compat import string_types
from nltk.tokenize import *
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class PlaintextCorpusReader(CorpusReader):
"""
Reader for corpora that consist of plaintext documents. Paragraphs
are assumed to be split using blank lines. Sentences and words can
be tokenized using the default tokenizers, or by custom tokenizers
specificed as parameters to the constructor.
This corpus reader can be customized (e.g., to skip preface
sections of specific document formats) by creating a subclass and
overriding the ``CorpusView`` class variable.
"""
CorpusView = StreamBackedCorpusView
"""The corpus view class used by this reader. Subclasses of
``PlaintextCorpusReader`` may specify alternative corpus view
classes (e.g., to skip the preface sections of documents.)"""
def __init__(self, root, fileids,
word_tokenizer=WordPunctTokenizer(),
sent_tokenizer=nltk.data.LazyLoader(
'tokenizers/punkt/english.pickle'),
para_block_reader=read_blankline_block,
encoding='utf8'):
"""
Construct a new plaintext corpus reader for a set of documents
located at the given root directory. Example usage:
>>> root = '/usr/local/share/nltk_data/corpora/webtext/'
>>> reader = PlaintextCorpusReader(root, '.*\.txt') # doctest: +SKIP
:param root: The root directory for this corpus.
:param fileids: A list or regexp specifying the fileids in this corpus.
:param word_tokenizer: Tokenizer for breaking sentences or
paragraphs into words.
:param sent_tokenizer: Tokenizer for breaking paragraphs
into words.
:param para_block_reader: The block reader used to divide the
corpus into paragraph blocks.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._word_tokenizer = word_tokenizer
self._sent_tokenizer = sent_tokenizer
self._para_block_reader = para_block_reader
def raw(self, fileids=None):
"""
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, string_types): fileids = [fileids]
raw_texts = []
for f in fileids:
_fin = self.open(f)
raw_texts.append(_fin.read())
_fin.close()
return concat(raw_texts)
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
"""
return concat([self.CorpusView(path, self._read_word_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
"""
if self._sent_tokenizer is None:
raise ValueError('No sentence tokenizer for this corpus')
return concat([self.CorpusView(path, self._read_sent_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
:rtype: list(list(list(str)))
"""
if self._sent_tokenizer is None:
raise ValueError('No sentence tokenizer for this corpus')
return concat([self.CorpusView(path, self._read_para_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def _read_word_block(self, stream):
words = []
for i in range(20): # Read 20 lines at a time.
words.extend(self._word_tokenizer.tokenize(stream.readline()))
return words
def _read_sent_block(self, stream):
sents = []
for para in self._para_block_reader(stream):
sents.extend([self._word_tokenizer.tokenize(sent)
for sent in self._sent_tokenizer.tokenize(para)])
return sents
def _read_para_block(self, stream):
paras = []
for para in self._para_block_reader(stream):
paras.append([self._word_tokenizer.tokenize(sent)
for sent in self._sent_tokenizer.tokenize(para)])
return paras
class CategorizedPlaintextCorpusReader(CategorizedCorpusReader,
PlaintextCorpusReader):
"""
A reader for plaintext corpora whose documents are divided into
categories based on their file identifiers.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the corpus reader. Categorization arguments
(``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
the ``CategorizedCorpusReader`` constructor. The remaining arguments
are passed to the ``PlaintextCorpusReader`` constructor.
"""
CategorizedCorpusReader.__init__(self, kwargs)
PlaintextCorpusReader.__init__(self, *args, **kwargs)
def _resolve(self, fileids, categories):
if fileids is not None and categories is not None:
raise ValueError('Specify fileids or categories, not both')
if categories is not None:
return self.fileids(categories)
else:
return fileids
def raw(self, fileids=None, categories=None):
return PlaintextCorpusReader.raw(
self, self._resolve(fileids, categories))
def words(self, fileids=None, categories=None):
return PlaintextCorpusReader.words(
self, self._resolve(fileids, categories))
def sents(self, fileids=None, categories=None):
return PlaintextCorpusReader.sents(
self, self._resolve(fileids, categories))
def paras(self, fileids=None, categories=None):
return PlaintextCorpusReader.paras(
self, self._resolve(fileids, categories))
# is there a better way?
class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader):
def __init__(self, *args, **kwargs):
CategorizedCorpusReader.__init__(self, kwargs)
kwargs['sent_tokenizer'] = nltk.data.LazyLoader('tokenizers/punkt/portuguese.pickle')
PlaintextCorpusReader.__init__(self, *args, **kwargs)
class EuroparlCorpusReader(PlaintextCorpusReader):
"""
Reader for Europarl corpora that consist of plaintext documents.
Documents are divided into chapters instead of paragraphs as
for regular plaintext documents. Chapters are separated using blank
lines. Everything is inherited from ``PlaintextCorpusReader`` except
that:
- Since the corpus is pre-processed and pre-tokenized, the
word tokenizer should just split the line at whitespaces.
- For the same reason, the sentence tokenizer should just
split the paragraph at line breaks.
- There is a new 'chapters()' method that returns chapters instead
instead of paragraphs.
- The 'paras()' method inherited from PlaintextCorpusReader is
made non-functional to remove any confusion between chapters
and paragraphs for Europarl.
"""
def _read_word_block(self, stream):
words = []
for i in range(20): # Read 20 lines at a time.
words.extend(stream.readline().split())
return words
def _read_sent_block(self, stream):
sents = []
for para in self._para_block_reader(stream):
sents.extend([sent.split() for sent in para.splitlines()])
return sents
def _read_para_block(self, stream):
paras = []
for para in self._para_block_reader(stream):
paras.append([sent.split() for sent in para.splitlines()])
return paras
def chapters(self, fileids=None):
"""
:return: the given file(s) as a list of
chapters, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
:rtype: list(list(list(str)))
"""
return concat([self.CorpusView(fileid, self._read_para_block,
encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None):
raise NotImplementedError('The Europarl corpus reader does not support paragraphs. Please use chapters() instead.')
| gpl-3.0 |
adobe/chromium | third_party/mesa/MesaLib/src/gallium/auxiliary/util/u_format_pack.py | 32 | 24506 | #!/usr/bin/env python
'''
/**************************************************************************
*
* Copyright 2009-2010 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Pixel format packing and unpacking functions.
*
* @author Jose Fonseca <jfonseca@vmware.com>
*/
'''
from u_format_parse import *
def generate_format_type(format):
'''Generate a structure that describes the format.'''
assert format.layout == PLAIN
print 'union util_format_%s {' % format.short_name()
if format.block_size() in (8, 16, 32, 64):
print ' uint%u_t value;' % (format.block_size(),)
use_bitfields = False
for channel in format.channels:
if channel.size % 8 or not is_pot(channel.size):
use_bitfields = True
print ' struct {'
for channel in format.channels:
if use_bitfields:
if channel.type == VOID:
if channel.size:
print ' unsigned %s:%u;' % (channel.name, channel.size)
elif channel.type == UNSIGNED:
print ' unsigned %s:%u;' % (channel.name, channel.size)
elif channel.type in (SIGNED, FIXED):
print ' int %s:%u;' % (channel.name, channel.size)
elif channel.type == FLOAT:
if channel.size == 64:
print ' double %s;' % (channel.name)
elif channel.size == 32:
print ' float %s;' % (channel.name)
else:
print ' unsigned %s:%u;' % (channel.name, channel.size)
else:
assert 0
else:
assert channel.size % 8 == 0 and is_pot(channel.size)
if channel.type == VOID:
if channel.size:
print ' uint%u_t %s;' % (channel.size, channel.name)
elif channel.type == UNSIGNED:
print ' uint%u_t %s;' % (channel.size, channel.name)
elif channel.type in (SIGNED, FIXED):
print ' int%u_t %s;' % (channel.size, channel.name)
elif channel.type == FLOAT:
if channel.size == 64:
print ' double %s;' % (channel.name)
elif channel.size == 32:
print ' float %s;' % (channel.name)
elif channel.size == 16:
print ' uint16_t %s;' % (channel.name)
else:
assert 0
else:
assert 0
print ' } chan;'
print '};'
print
def bswap_format(format):
'''Generate a structure that describes the format.'''
if format.is_bitmask() and not format.is_array() and format.block_size() > 8:
print '#ifdef PIPE_ARCH_BIG_ENDIAN'
print ' pixel.value = util_bswap%u(pixel.value);' % format.block_size()
print '#endif'
def is_format_supported(format):
'''Determines whether we actually have the plumbing necessary to generate the
to read/write to/from this format.'''
# FIXME: Ideally we would support any format combination here.
if format.layout != PLAIN:
return False
for i in range(4):
channel = format.channels[i]
if channel.type not in (VOID, UNSIGNED, SIGNED, FLOAT, FIXED):
return False
if channel.type == FLOAT and channel.size not in (16, 32, 64):
return False
return True
def native_type(format):
'''Get the native appropriate for a format.'''
if format.layout == PLAIN:
if not format.is_array():
# For arithmetic pixel formats return the integer type that matches the whole pixel
return 'uint%u_t' % format.block_size()
else:
# For array pixel formats return the integer type that matches the color channel
channel = format.channels[0]
if channel.type in (UNSIGNED, VOID):
return 'uint%u_t' % channel.size
elif channel.type in (SIGNED, FIXED):
return 'int%u_t' % channel.size
elif channel.type == FLOAT:
if channel.size == 16:
return 'uint16_t'
elif channel.size == 32:
return 'float'
elif channel.size == 64:
return 'double'
else:
assert False
else:
assert False
else:
assert False
def intermediate_native_type(bits, sign):
'''Find a native type adequate to hold intermediate results of the request bit size.'''
bytes = 4 # don't use anything smaller than 32bits
while bytes * 8 < bits:
bytes *= 2
bits = bytes*8
if sign:
return 'int%u_t' % bits
else:
return 'uint%u_t' % bits
def get_one_shift(type):
'''Get the number of the bit that matches unity for this type.'''
if type.type == 'FLOAT':
assert False
if not type.norm:
return 0
if type.type == UNSIGNED:
return type.size
if type.type == SIGNED:
return type.size - 1
if type.type == FIXED:
return type.size / 2
assert False
def value_to_native(type, value):
'''Get the value of unity for this type.'''
if type.type == FLOAT:
return value
if type.type == FIXED:
return int(value * (1 << (type.size/2)))
if not type.norm:
return int(value)
if type.type == UNSIGNED:
return int(value * ((1 << type.size) - 1))
if type.type == SIGNED:
return int(value * ((1 << (type.size - 1)) - 1))
assert False
def native_to_constant(type, value):
'''Get the value of unity for this type.'''
if type.type == FLOAT:
if type.size <= 32:
return "%ff" % value
else:
return "%ff" % value
else:
return str(int(value))
def get_one(type):
'''Get the value of unity for this type.'''
return value_to_native(type, 1)
def clamp_expr(src_channel, dst_channel, dst_native_type, value):
'''Generate the expression to clamp the value in the source type to the
destination type range.'''
if src_channel == dst_channel:
return value
src_min = src_channel.min()
src_max = src_channel.max()
dst_min = dst_channel.min()
dst_max = dst_channel.max()
# Translate the destination range to the src native value
dst_min_native = value_to_native(src_channel, dst_min)
dst_max_native = value_to_native(src_channel, dst_max)
if src_min < dst_min and src_max > dst_max:
return 'CLAMP(%s, %s, %s)' % (value, dst_min_native, dst_max_native)
if src_max > dst_max:
return 'MIN2(%s, %s)' % (value, dst_max_native)
if src_min < dst_min:
return 'MAX2(%s, %s)' % (value, dst_min_native)
return value
def conversion_expr(src_channel,
dst_channel, dst_native_type,
value,
clamp=True,
src_colorspace = RGB,
dst_colorspace = RGB):
'''Generate the expression to convert a value between two types.'''
if src_colorspace != dst_colorspace:
if src_colorspace == SRGB:
assert src_channel.type == UNSIGNED
assert src_channel.norm
assert src_channel.size == 8
assert dst_colorspace == RGB
if dst_channel.type == FLOAT:
return 'util_format_srgb_8unorm_to_linear_float(%s)' % value
else:
assert dst_channel.type == UNSIGNED
assert dst_channel.norm
assert dst_channel.size == 8
return 'util_format_srgb_to_linear_8unorm(%s)' % value
elif dst_colorspace == SRGB:
assert dst_channel.type == UNSIGNED
assert dst_channel.norm
assert dst_channel.size == 8
assert src_colorspace == RGB
if src_channel.type == FLOAT:
return 'util_format_linear_float_to_srgb_8unorm(%s)' % value
else:
assert src_channel.type == UNSIGNED
assert src_channel.norm
assert src_channel.size == 8
return 'util_format_linear_to_srgb_8unorm(%s)' % value
elif src_colorspace == ZS:
pass
elif dst_colorspace == ZS:
pass
else:
assert 0
if src_channel == dst_channel:
return value
src_type = src_channel.type
src_size = src_channel.size
src_norm = src_channel.norm
# Promote half to float
if src_type == FLOAT and src_size == 16:
value = 'util_half_to_float(%s)' % value
src_size = 32
# Special case for float <-> ubytes for more accurate results
# Done before clamping since these functions already take care of that
if src_type == UNSIGNED and src_norm and src_size == 8 and dst_channel.type == FLOAT and dst_channel.size == 32:
return 'ubyte_to_float(%s)' % value
if src_type == FLOAT and src_size == 32 and dst_channel.type == UNSIGNED and dst_channel.norm and dst_channel.size == 8:
return 'float_to_ubyte(%s)' % value
if clamp:
if dst_channel.type != FLOAT or src_type != FLOAT:
value = clamp_expr(src_channel, dst_channel, dst_native_type, value)
if src_type in (SIGNED, UNSIGNED) and dst_channel.type in (SIGNED, UNSIGNED):
if not src_norm and not dst_channel.norm:
# neither is normalized -- just cast
return '(%s)%s' % (dst_native_type, value)
src_one = get_one(src_channel)
dst_one = get_one(dst_channel)
if src_one > dst_one and src_norm and dst_channel.norm:
# We can just bitshift
src_shift = get_one_shift(src_channel)
dst_shift = get_one_shift(dst_channel)
value = '(%s >> %s)' % (value, src_shift - dst_shift)
else:
# We need to rescale using an intermediate type big enough to hold the multiplication of both
tmp_native_type = intermediate_native_type(src_size + dst_channel.size, src_channel.sign and dst_channel.sign)
value = '((%s)%s)' % (tmp_native_type, value)
value = '(%s * 0x%x / 0x%x)' % (value, dst_one, src_one)
value = '(%s)%s' % (dst_native_type, value)
return value
# Promote to either float or double
if src_type != FLOAT:
if src_norm or src_type == FIXED:
one = get_one(src_channel)
if src_size <= 23:
value = '(%s * (1.0f/0x%x))' % (value, one)
if dst_channel.size <= 32:
value = '(float)%s' % value
src_size = 32
else:
# bigger than single precision mantissa, use double
value = '(%s * (1.0/0x%x))' % (value, one)
src_size = 64
src_norm = False
else:
if src_size <= 23 or dst_channel.size <= 32:
value = '(float)%s' % value
src_size = 32
else:
# bigger than single precision mantissa, use double
value = '(double)%s' % value
src_size = 64
src_type = FLOAT
# Convert double or float to non-float
if dst_channel.type != FLOAT:
if dst_channel.norm or dst_channel.type == FIXED:
dst_one = get_one(dst_channel)
if dst_channel.size <= 23:
value = '(%s * 0x%x)' % (value, dst_one)
else:
# bigger than single precision mantissa, use double
value = '(%s * (double)0x%x)' % (value, dst_one)
value = '(%s)%s' % (dst_native_type, value)
else:
# Cast double to float when converting to either half or float
if dst_channel.size <= 32 and src_size > 32:
value = '(float)%s' % value
src_size = 32
if dst_channel.size == 16:
value = 'util_float_to_half(%s)' % value
elif dst_channel.size == 64 and src_size < 64:
value = '(double)%s' % value
return value
def generate_unpack_kernel(format, dst_channel, dst_native_type):
if not is_format_supported(format):
return
assert format.layout == PLAIN
src_native_type = native_type(format)
if format.is_bitmask():
depth = format.block_size()
print ' uint%u_t value = *(const uint%u_t *)src;' % (depth, depth)
# Declare the intermediate variables
for i in range(format.nr_channels()):
src_channel = format.channels[i]
if src_channel.type == UNSIGNED:
print ' uint%u_t %s;' % (depth, src_channel.name)
elif src_channel.type == SIGNED:
print ' int%u_t %s;' % (depth, src_channel.name)
if depth > 8:
print '#ifdef PIPE_ARCH_BIG_ENDIAN'
print ' value = util_bswap%u(value);' % depth
print '#endif'
# Compute the intermediate unshifted values
shift = 0
for i in range(format.nr_channels()):
src_channel = format.channels[i]
value = 'value'
if src_channel.type == UNSIGNED:
if shift:
value = '%s >> %u' % (value, shift)
if shift + src_channel.size < depth:
value = '(%s) & 0x%x' % (value, (1 << src_channel.size) - 1)
elif src_channel.type == SIGNED:
if shift + src_channel.size < depth:
# Align the sign bit
lshift = depth - (shift + src_channel.size)
value = '%s << %u' % (value, lshift)
# Cast to signed
value = '(int%u_t)(%s) ' % (depth, value)
if src_channel.size < depth:
# Align the LSB bit
rshift = depth - src_channel.size
value = '(%s) >> %u' % (value, rshift)
else:
value = None
if value is not None:
print ' %s = %s;' % (src_channel.name, value)
shift += src_channel.size
# Convert, swizzle, and store final values
for i in range(4):
swizzle = format.swizzles[i]
if swizzle < 4:
src_channel = format.channels[swizzle]
src_colorspace = format.colorspace
if src_colorspace == SRGB and i == 3:
# Alpha channel is linear
src_colorspace = RGB
value = src_channel.name
value = conversion_expr(src_channel,
dst_channel, dst_native_type,
value,
src_colorspace = src_colorspace)
elif swizzle == SWIZZLE_0:
value = '0'
elif swizzle == SWIZZLE_1:
value = get_one(dst_channel)
elif swizzle == SWIZZLE_NONE:
value = '0'
else:
assert False
print ' dst[%u] = %s; /* %s */' % (i, value, 'rgba'[i])
else:
print ' union util_format_%s pixel;' % format.short_name()
print ' memcpy(&pixel, src, sizeof pixel);'
bswap_format(format)
for i in range(4):
swizzle = format.swizzles[i]
if swizzle < 4:
src_channel = format.channels[swizzle]
src_colorspace = format.colorspace
if src_colorspace == SRGB and i == 3:
# Alpha channel is linear
src_colorspace = RGB
value = 'pixel.chan.%s' % src_channel.name
value = conversion_expr(src_channel,
dst_channel, dst_native_type,
value,
src_colorspace = src_colorspace)
elif swizzle == SWIZZLE_0:
value = '0'
elif swizzle == SWIZZLE_1:
value = get_one(dst_channel)
elif swizzle == SWIZZLE_NONE:
value = '0'
else:
assert False
print ' dst[%u] = %s; /* %s */' % (i, value, 'rgba'[i])
def generate_pack_kernel(format, src_channel, src_native_type):
if not is_format_supported(format):
return
dst_native_type = native_type(format)
assert format.layout == PLAIN
inv_swizzle = format.inv_swizzles()
if format.is_bitmask():
depth = format.block_size()
print ' uint%u_t value = 0;' % depth
shift = 0
for i in range(4):
dst_channel = format.channels[i]
if inv_swizzle[i] is not None:
value ='src[%u]' % inv_swizzle[i]
dst_colorspace = format.colorspace
if dst_colorspace == SRGB and inv_swizzle[i] == 3:
# Alpha channel is linear
dst_colorspace = RGB
value = conversion_expr(src_channel,
dst_channel, dst_native_type,
value,
dst_colorspace = dst_colorspace)
if dst_channel.type in (UNSIGNED, SIGNED):
if shift + dst_channel.size < depth:
value = '(%s) & 0x%x' % (value, (1 << dst_channel.size) - 1)
if shift:
value = '(%s) << %u' % (value, shift)
if dst_channel.type == SIGNED:
# Cast to unsigned
value = '(uint%u_t)(%s) ' % (depth, value)
else:
value = None
if value is not None:
print ' value |= %s;' % (value)
shift += dst_channel.size
if depth > 8:
print '#ifdef PIPE_ARCH_BIG_ENDIAN'
print ' value = util_bswap%u(value);' % depth
print '#endif'
print ' *(uint%u_t *)dst = value;' % depth
else:
print ' union util_format_%s pixel;' % format.short_name()
for i in range(4):
dst_channel = format.channels[i]
width = dst_channel.size
if inv_swizzle[i] is None:
continue
dst_colorspace = format.colorspace
if dst_colorspace == SRGB and inv_swizzle[i] == 3:
# Alpha channel is linear
dst_colorspace = RGB
value ='src[%u]' % inv_swizzle[i]
value = conversion_expr(src_channel,
dst_channel, dst_native_type,
value,
dst_colorspace = dst_colorspace)
print ' pixel.chan.%s = %s;' % (dst_channel.name, value)
bswap_format(format)
print ' memcpy(dst, &pixel, sizeof pixel);'
def generate_format_unpack(format, dst_channel, dst_native_type, dst_suffix):
'''Generate the function to unpack pixels from a particular format'''
name = format.short_name()
print 'static INLINE void'
print 'util_format_%s_unpack_%s(%s *dst_row, unsigned dst_stride, const uint8_t *src_row, unsigned src_stride, unsigned width, unsigned height)' % (name, dst_suffix, dst_native_type)
print '{'
if is_format_supported(format):
print ' unsigned x, y;'
print ' for(y = 0; y < height; y += %u) {' % (format.block_height,)
print ' %s *dst = dst_row;' % (dst_native_type)
print ' const uint8_t *src = src_row;'
print ' for(x = 0; x < width; x += %u) {' % (format.block_width,)
generate_unpack_kernel(format, dst_channel, dst_native_type)
print ' src += %u;' % (format.block_size() / 8,)
print ' dst += 4;'
print ' }'
print ' src_row += src_stride;'
print ' dst_row += dst_stride/sizeof(*dst_row);'
print ' }'
print '}'
print
def generate_format_pack(format, src_channel, src_native_type, src_suffix):
'''Generate the function to pack pixels to a particular format'''
name = format.short_name()
print 'static INLINE void'
print 'util_format_%s_pack_%s(uint8_t *dst_row, unsigned dst_stride, const %s *src_row, unsigned src_stride, unsigned width, unsigned height)' % (name, src_suffix, src_native_type)
print '{'
if is_format_supported(format):
print ' unsigned x, y;'
print ' for(y = 0; y < height; y += %u) {' % (format.block_height,)
print ' const %s *src = src_row;' % (src_native_type)
print ' uint8_t *dst = dst_row;'
print ' for(x = 0; x < width; x += %u) {' % (format.block_width,)
generate_pack_kernel(format, src_channel, src_native_type)
print ' src += 4;'
print ' dst += %u;' % (format.block_size() / 8,)
print ' }'
print ' dst_row += dst_stride;'
print ' src_row += src_stride/sizeof(*src_row);'
print ' }'
print '}'
print
def generate_format_fetch(format, dst_channel, dst_native_type, dst_suffix):
'''Generate the function to unpack pixels from a particular format'''
name = format.short_name()
print 'static INLINE void'
print 'util_format_%s_fetch_%s(%s *dst, const uint8_t *src, unsigned i, unsigned j)' % (name, dst_suffix, dst_native_type)
print '{'
if is_format_supported(format):
generate_unpack_kernel(format, dst_channel, dst_native_type)
print '}'
print
def is_format_hand_written(format):
return format.layout in ('s3tc', 'subsampled', 'other') or format.colorspace == ZS
def generate(formats):
print
print '#include "pipe/p_compiler.h"'
print '#include "u_math.h"'
print '#include "u_half.h"'
print '#include "u_format.h"'
print '#include "u_format_other.h"'
print '#include "u_format_srgb.h"'
print '#include "u_format_yuv.h"'
print '#include "u_format_zs.h"'
print
for format in formats:
if not is_format_hand_written(format):
if is_format_supported(format):
generate_format_type(format)
channel = Channel(FLOAT, False, 32)
native_type = 'float'
suffix = 'rgba_float'
generate_format_unpack(format, channel, native_type, suffix)
generate_format_pack(format, channel, native_type, suffix)
generate_format_fetch(format, channel, native_type, suffix)
channel = Channel(UNSIGNED, True, 8)
native_type = 'uint8_t'
suffix = 'rgba_8unorm'
generate_format_unpack(format, channel, native_type, suffix)
generate_format_pack(format, channel, native_type, suffix)
| bsd-3-clause |
agoose77/hivesystem | bee/segments/weaver.py | 1 | 2642 | from .. import types
from ._runtime_segment import runtime_weaver
class weaver(object):
identifier = None
def __init__(self, type, *inputs):
self.type = types.typeclass(type).get_tuple()
if len(self.type) != len(inputs):
raise TypeError("Weaver is declared as a tuple of %d inputs, but was initialized with only %d" % (
len(self.type), len(inputs)))
self.inputs = []
for inp in inputs:
self.inputs.append(types.connection_outputclass(inp))
self._connection_output = []
self.bound = True
for inp, typ in zip(self.inputs, self.type):
refetyp = types.mode_type("pull", typ)
if inp.bound == True:
inptyp = inp.value.connection_output_type()
inptyp = types.mode_type(*inptyp)
if inptyp != refetyp:
raise TypeError("Weaver input type should be (%s, %s), is (%s, %s)" % \
(refetyp.mode.value, refetyp.type.value, inptyp.mode.value, inptyp.type.value))
else:
self.bound = False
def typetest(self):
for inp, typ in zip(self.inputs, self.type):
refetyp = types.mode_type("pull", typ)
inptyp = inp.value.connection_output_type()
inptyp = types.mode_type(*inptyp)
if inptyp != refetyp:
raise TypeError("Weaver input type should be (%s, %s), is (%s, %s)" % \
(refetyp.mode.value, refetyp.type.value, inptyp.mode.value, inptyp.type.value))
def bind(self, classname, dic):
if not self.bound:
for inp in self.inputs:
if not inp.bound: inp.bind(classname, dic)
self.bound = True
self.typetest()
def connection_output_type(self):
return "pull", self.type
def connection_output(self, connection):
self._connection_output.append(connection)
def connect(self, identifier):
class weaver_proxy:
def __init__(self, identifier):
self.identifier = identifier
self.identifier = "weaver-" + identifier
for inr, inp in enumerate(self.inputs):
inp.value.connection_output(weaver_proxy(self.identifier + ":" + str(inr + 1)))
def build(self, segmentname):
dic = {
"segmentname": segmentname,
"identifier": self.identifier,
"_inputs": self.inputs,
"connection_output": self._connection_output,
}
return type("runtime_weaver:" + segmentname, (runtime_weaver,), dic)
| bsd-2-clause |
akintoey/django | tests/template_tests/filter_tests/test_truncatewords_html.py | 386 | 1607 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import truncatewords_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')
def test_truncate(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 2),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate2(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 4),
'<p>one <a href="#">two - three <br>four ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 5),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate4(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatewords_html('\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m ...')
def test_truncate_complex(self):
self.assertEqual(
truncatewords_html('<i>Buenos días! ¿Cómo está?</i>', 3),
'<i>Buenos días! ¿Cómo ...</i>',
)
| bsd-3-clause |
AlanZatarain/django-filebrowser | filebrowser/decorators.py | 4 | 2693 | # coding: utf-8
# django imports
from django.contrib.sessions.models import Session
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.template import RequestContext
from django.conf import settings
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured
# filebrowser imports
from filebrowser.functions import get_path, get_file
from filebrowser.templatetags.fb_tags import query_helper
def flash_login_required(function):
"""
Decorator to recognize a user by its session.
Used for Flash-Uploading.
"""
def decorator(request, *args, **kwargs):
try:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
except:
import django.contrib.sessions.backends.db
engine = django.contrib.sessions.backends.db
session_data = engine.SessionStore(request.POST.get('session_key'))
user_id = session_data['_auth_user_id']
# will return 404 if the session ID does not resolve to a valid user
request.user = get_object_or_404(User, pk=user_id)
return function(request, *args, **kwargs)
return decorator
def path_exists(function):
"""
Check if the given path exists.
"""
def decorator(request, *args, **kwargs):
if get_path('') == None:
# The DIRECTORY does not exist, raise an error to prevent eternal redirecting.
raise ImproperlyConfigured, _("Error finding Upload-Folder (MEDIA_ROOT + DIRECTORY). Maybe it does not exist?")
if get_path(request.GET.get('dir', '')) == None:
msg = _('The requested Folder does not exist.')
messages.add_message(request, messages.ERROR, msg)
redirect_url = reverse("fb_browse") + query_helper(request.GET, "", "dir")
return HttpResponseRedirect(redirect_url)
return function(request, *args, **kwargs)
return decorator
def file_exists(function):
"""
Check if the given file exists.
"""
def decorator(request, *args, **kwargs):
if get_file(request.GET.get('dir', ''), request.GET.get('filename', '')) == None:
msg = _('The requested File does not exist.')
messages.add_message(request, messages.ERROR, msg)
redirect_url = reverse("fb_browse") + query_helper(request.GET, "", "dir")
return HttpResponseRedirect(redirect_url)
return function(request, *args, **kwargs)
return decorator
| bsd-3-clause |
chalkchisel/suds | suds/xsd/sxbasic.py | 197 | 22829 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbasic} module provides classes that represent
I{basic} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.xsd.sxbase import *
from suds.xsd.query import *
from suds.sax import splitPrefix, Namespace
from suds.transport import TransportError
from suds.reader import DocumentReader
from urlparse import urljoin
log = getLogger(__name__)
class RestrictionMatcher:
"""
For use with L{NodeFinder} to match restriction.
"""
def match(self, n):
return isinstance(n, Restriction)
class TypedContent(Content):
"""
Represents any I{typed} content.
"""
def resolve(self, nobuiltin=False):
qref = self.qref()
if qref is None:
return self
key = 'resolved:nb=%s' % nobuiltin
cached = self.cache.get(key)
if cached is not None:
return cached
result = self
query = TypeQuery(qref)
query.history = [self]
log.debug('%s, resolving: %s\n using:%s', self.id, qref, query)
resolved = query.execute(self.schema)
if resolved is None:
log.debug(self.schema)
raise TypeNotFound(qref)
self.cache[key] = resolved
if resolved.builtin():
if nobuiltin:
result = self
else:
result = resolved
else:
result = resolved.resolve(nobuiltin)
return result
def qref(self):
"""
Get the I{type} qualified reference to the referenced xsd type.
This method takes into account simple types defined through
restriction with are detected by determining that self is simple
(len=0) and by finding a restriction child.
@return: The I{type} qualified reference.
@rtype: qref
"""
qref = self.type
if qref is None and len(self) == 0:
ls = []
m = RestrictionMatcher()
finder = NodeFinder(m, 1)
finder.find(self, ls)
if len(ls):
return ls[0].ref
return qref
class Complex(SchemaObject):
"""
Represents an (xsd) schema <xs:complexType/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return (
'attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'complexContent',
'simpleContent',
'any',
'group')
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def mixed(self):
for c in self.rawchildren:
if isinstance(c, SimpleContent) and c.mixed():
return True
return False
class Group(SchemaObject):
"""
Represents an (xsd) schema <xs:group/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return ('sequence', 'all', 'choice')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = GroupQuery(self.ref)
g = query.execute(self.schema)
if g is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(g)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref',)
class AttributeGroup(SchemaObject):
"""
Represents an (xsd) schema <xs:attributeGroup/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return ('attribute', 'attributeGroup')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrGroupQuery(self.ref)
ag = query.execute(self.schema)
if ag is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(ag)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref',)
class Simple(SchemaObject):
"""
Represents an (xsd) schema <xs:simpleType/> node
"""
def childtags(self):
return ('restriction', 'any', 'list',)
def enum(self):
for child, ancestry in self.children():
if isinstance(child, Enumeration):
return True
return False
def mixed(self):
return len(self)
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class List(SchemaObject):
"""
Represents an (xsd) schema <xs:list/> node
"""
def childtags(self):
return ()
def description(self):
return ('name',)
def xslist(self):
return True
class Restriction(SchemaObject):
"""
Represents an (xsd) schema <xs:restriction/> node
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return ('enumeration', 'attribute', 'attributeGroup')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return (midx, deps)
def restriction(self):
return True
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def description(self):
return ('ref',)
class Collection(SchemaObject):
"""
Represents an (xsd) schema collection node:
- sequence
- choice
- all
"""
def childtags(self):
return ('element', 'sequence', 'all', 'choice', 'any', 'group')
class Sequence(Collection):
"""
Represents an (xsd) schema <xs:sequence/> node.
"""
def sequence(self):
return True
class All(Collection):
"""
Represents an (xsd) schema <xs:all/> node.
"""
def all(self):
return True
class Choice(Collection):
"""
Represents an (xsd) schema <xs:choice/> node.
"""
def choice(self):
return True
class ComplexContent(SchemaObject):
"""
Represents an (xsd) schema <xs:complexContent/> node.
"""
def childtags(self):
return ('attribute', 'attributeGroup', 'extension', 'restriction')
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class SimpleContent(SchemaObject):
"""
Represents an (xsd) schema <xs:simpleContent/> node.
"""
def childtags(self):
return ('extension', 'restriction')
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def mixed(self):
return len(self)
class Enumeration(Content):
"""
Represents an (xsd) schema <xs:enumeration/> node
"""
def __init__(self, schema, root):
Content.__init__(self, schema, root)
self.name = root.get('value')
def enum(self):
return True
class Element(TypedContent):
"""
Represents an (xsd) schema <xs:element/> node.
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
a = root.get('form')
if a is not None:
self.form_qualified = ( a == 'qualified' )
a = self.root.get('nillable')
if a is not None:
self.nillable = ( a in ('1', 'true') )
self.implany()
def implany(self):
"""
Set the type as any when implicit.
An implicit <xs:any/> is when an element has not
body and no type defined.
@return: self
@rtype: L{Element}
"""
if self.type is None and \
self.ref is None and \
self.root.isempty():
self.type = self.anytype()
return self
def childtags(self):
return ('attribute', 'simpleType', 'complexType', 'any',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = ElementQuery(self.ref)
e = query.execute(self.schema)
if e is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(e)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref', 'type')
def anytype(self):
""" create an xsd:anyType reference """
p,u = Namespace.xsdns
mp = self.root.findPrefix(u)
if mp is None:
mp = p
self.root.addPrefix(p, u)
return ':'.join((mp, 'anyType'))
class Extension(SchemaObject):
"""
Represents an (xsd) schema <xs:extension/> node.
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return ('attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'group')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def extension(self):
return ( self.ref is not None )
def description(self):
return ('ref',)
class Import(SchemaObject):
"""
Represents an (xsd) schema <xs:import/> node
@cvar locations: A dictionary of namespace locations.
@type locations: dict
@ivar ns: The imported namespace.
@type ns: str
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
@classmethod
def bind(cls, ns, location=None):
"""
Bind a namespace to a schema location (URI).
This is used for imports that don't specify a schemaLocation.
@param ns: A namespace-uri.
@type ns: str
@param location: The (optional) schema location for the
namespace. (default=ns).
@type location: str
"""
if location is None:
location = ns
cls.locations[ns] = location
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ns = (None, root.get('namespace'))
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and import the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, importing ns="%s", location="%s"', self.id, self.ns[1], self.location)
result = self.locate()
if result is None:
if self.location is None:
log.debug('imported schema (%s) not-found', self.ns[1])
else:
result = self.download(options)
log.debug('imported:\n%s', result)
return result
def locate(self):
""" find the schema locally """
if self.ns[1] == self.schema.tns[1]:
return None
else:
return self.schema.locate(self.ns)
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'imported schema (%s) at (%s), failed' % (self.ns[1], url)
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def description(self):
return ('ns', 'location')
class Include(SchemaObject):
"""
Represents an (xsd) schema <xs:include/> node
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
self.__applytns(root)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'include schema at (%s), failed' % url
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def __applytns(self, root):
""" make sure included schema has same tns. """
TNS = 'targetNamespace'
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, '%s mismatch' % TNS
def description(self):
return ('location')
class Attribute(TypedContent):
"""
Represents an (xsd) <attribute/> node
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
self.use = root.get('use', default='')
def childtags(self):
return ('restriction',)
def isattr(self):
return True
def get_default(self):
"""
Gets the <xs:attribute default=""/> attribute value.
@return: The default value for the attribute
@rtype: str
"""
return self.root.get('default', default='')
def optional(self):
return ( self.use != 'required' )
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrQuery(self.ref)
a = query.execute(self.schema)
if a is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(a)
midx = 0
return (midx, deps)
def description(self):
return ('name', 'ref', 'type')
class Any(Content):
"""
Represents an (xsd) <any/> node
"""
def get_child(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) child')
child = Any(self.schema, root)
return (child, [])
def get_attribute(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) attribute')
attribute = Any(self.schema, root)
return (attribute, [])
def any(self):
return True
class Factory:
"""
@cvar tags: A factory to create object objects based on tag.
@type tags: {tag:fn,}
"""
tags =\
{
'import' : Import,
'include' : Include,
'complexType' : Complex,
'group' : Group,
'attributeGroup' : AttributeGroup,
'simpleType' : Simple,
'list' : List,
'element' : Element,
'attribute' : Attribute,
'sequence' : Sequence,
'all' : All,
'choice' : Choice,
'complexContent' : ComplexContent,
'simpleContent' : SimpleContent,
'restriction' : Restriction,
'enumeration' : Enumeration,
'extension' : Extension,
'any' : Any,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, root, schema):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param schema: A schema object.
@type schema: L{schema.Schema}
@return: The created object.
@rtype: L{SchemaObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(schema, root)
else:
return None
@classmethod
def build(cls, root, schema, filter=('*',)):
"""
Build an xsobject representation.
@param root: An schema XML root.
@type root: L{sax.element.Element}
@param filter: A tag filter.
@type filter: [str,...]
@return: A schema object graph.
@rtype: L{sxbase.SchemaObject}
"""
children = []
for node in root.getChildren(ns=Namespace.xsdns):
if '*' in filter or node.name in filter:
child = cls.create(node, schema)
if child is None:
continue
children.append(child)
c = cls.build(node, schema, child.childtags())
child.rawchildren = c
return children
@classmethod
def collate(cls, children):
imports = []
elements = {}
attributes = {}
types = {}
groups = {}
agrps = {}
for c in children:
if isinstance(c, (Import, Include)):
imports.append(c)
continue
if isinstance(c, Attribute):
attributes[c.qname] = c
continue
if isinstance(c, Element):
elements[c.qname] = c
continue
if isinstance(c, Group):
groups[c.qname] = c
continue
if isinstance(c, AttributeGroup):
agrps[c.qname] = c
continue
types[c.qname] = c
for i in imports:
children.remove(i)
return (children, imports, attributes, elements, types, groups, agrps)
#######################################################
# Static Import Bindings :-(
#######################################################
Import.bind(
'http://schemas.xmlsoap.org/soap/encoding/',
'suds://schemas.xmlsoap.org/soap/encoding/')
Import.bind(
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2001/xml.xsd')
Import.bind(
'http://www.w3.org/2001/XMLSchema',
'http://www.w3.org/2001/XMLSchema.xsd')
| lgpl-3.0 |
nyalldawson/QGIS | scripts/context_help_id.py | 25 | 2268 | #!/usr/bin/env python3
"""
/***************************************************************************
context_help_id.py
-------------------
begin : 2009-11-16
copyright : (C) 2009 by Gary E.Sherman
email : gsherman at mrcc.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script generates a unique context id based for use in the QGIS
context sensitive help system. It uses the SHA1 hash for the class name
and converts the first 12 characters to a unique integer.
To create a context id, pass the name of the QGIS class on the command line.
Example:
./context_help_id.py QgsAbout
This script requires Python 2.5 or higher (hashlib was introduced at 2.5).
NOTE: Due to a change in the way context ids are generated, ids
generated by the old method (Java hashCode function) will be different than
the id generated by the new method for the same class.
"""
import hashlib
import sys
# check to see if a class name was specified and if so, create the context id
if len(sys.argv) > 1:
hash = hashlib.sha1()
# set the hash to the name passed on the command line
hash.update(sys.argv[1])
# generate the context id by converting the first 12 characters of the hash
# to decimal
context_id = int(hash.hexdigest()[:12], 16)
# print the result
print context_id
else:
# if no class name was specified, give a bit of help
print "To generate a context sensitive help id, specify the QGIS class name on the command line"
| gpl-2.0 |
kikusu/chainer | tests/chainer_tests/functions_tests/array_tests/test_copy.py | 5 | 2443 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
def _to_gpu(x, device_id):
if device_id >= 0:
return cuda.to_gpu(x, device_id)
else:
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class Copy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
def check_forward(self, src_id, dst_id):
x_data = _to_gpu(self.x_data, src_id)
x = chainer.Variable(x_data)
y = functions.copy(x, dst_id)
self.assertEqual(self.x_data.dtype, self.dtype)
numpy.testing.assert_array_equal(self.x_data, cuda.to_cpu(y.data))
def check_backward(self, src_id, dst_id):
x_data = _to_gpu(self.x_data, src_id)
x = chainer.Variable(x_data)
y = functions.copy(x, dst_id)
gy = _to_gpu(self.gy, dst_id)
y.grad = gy
y.backward()
x_grad = x.grad
numpy.testing.assert_array_equal(
cuda.to_cpu(x_grad), self.gy)
def test_forward_cpu(self):
self.check_forward(-1, -1)
def test_backward_cpu(self):
self.check_backward(-1, -1)
@attr.gpu
def test_forward_gpu(self):
device_id = cuda.Device().id
self.check_forward(device_id, device_id)
@attr.gpu
def test_check_backward_gpu(self):
device_id = cuda.Device().id
self.check_forward(device_id, device_id)
@attr.gpu
def test_forward_cpu_to_gpu(self):
device_id = cuda.Device().id
self.check_forward(-1, device_id)
@attr.gpu
def test_backward_cpu_to_gpu(self):
device_id = cuda.Device().id
self.check_backward(-1, device_id)
@attr.gpu
def test_forward_gpu_to_cpu(self):
device_id = cuda.Device().id
self.check_forward(device_id, -1)
@attr.gpu
def test_backward_gpu_to_cpu(self):
device_id = cuda.Device().id
self.check_backward(device_id, -1)
@attr.multi_gpu(2)
def test_forward_multigpu(self):
self.check_forward(0, 1)
@attr.multi_gpu(2)
def test_backward_multigpu(self):
self.check_backward(0, 1)
testing.run_module(__name__, __file__)
| mit |
phoebusliang/parallel-lettuce | tests/integration/lib/Django-1.2.5/django/conf/locale/ka/formats.py | 80 | 1610 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i:s a'
DATETIME_FORMAT = 'j F, Y h:i:s a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # (Monday)
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
| gpl-3.0 |
deepmind/pysc2 | pysc2/bin/valid_actions.py | 1 | 1964 | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print the valid actions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point_flag
FLAGS = flags.FLAGS
point_flag.DEFINE_point("screen_size", "84", "Resolution for screen actions.")
point_flag.DEFINE_point("minimap_size", "64", "Resolution for minimap actions.")
flags.DEFINE_bool("hide_specific", False, "Hide the specific actions")
def main(unused_argv):
"""Print the valid actions."""
feats = features.Features(
# Actually irrelevant whether it's feature or rgb size.
features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(
screen=FLAGS.screen_size,
minimap=FLAGS.minimap_size)))
action_spec = feats.action_spec()
flattened = 0
count = 0
for func in action_spec.functions:
if FLAGS.hide_specific and actions.FUNCTIONS[func.id].general_id != 0:
continue
count += 1
act_flat = 1
for arg in func.args:
for size in arg.sizes:
act_flat *= size
flattened += act_flat
print(func.str(True))
print("Total base actions:", count)
print("Total possible actions (flattened):", flattened)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_geometries/geometry/mesh/_reference/_reference_input_order/_reference_input_order.py | 2 | 3815 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images between import and export
self.__assistant.CompareRenderedImages(context)
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
benchmark-subsetting/adaptive-sampling-kit | common/tree.py | 2 | 9522 | # Copyright (c) 2011-2012, Universite de Versailles St-Quentin-en-Yvelines
#
# This file is part of ASK. ASK is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import pickle
import tempfile
from colors import colors
from regression import linear
def tree2dot(outfilename, tree, axes, categories):
"""
Dumps a tree in dotty format.
outfilename : name of the output dot file
tree : the tree to dump
axes : a list of strings containing the label for the axes
categories : a list of dictionnaries that returns for each
categorical axis, the labels of each category.
For instance, if dimension 3 contains three
classes: 1-> blue, 2-> red, 3 -> green
then categories[3] = {1:"blue", 2:"red", 3:"green"}
"""
f = open(outfilename, "w")
f.write("graph decisionT {\n" + tree.todot(axes, categories) + "\n}")
f.close()
def tree2png(outfilename, tree, axes, categories):
"""
Dumps a tree in png.
outfilename : name of the output png file
tree : the tree to dump
axes : a list of strings containing the label for the axes
categories : a list of dictionnaries that returns for each
categorical axis, the labels of each category.
For instance, if dimension 3 contains three
classes: 1-> blue, 2-> red, 3 -> green
then categories[3] = {1:"blue", 2:"red", 3:"green"}
"""
dotfile = tempfile.NamedTemporaryFile()
tree2dot(dotfile.name, tree, axes, categories)
os.system("dot -Tpng -o {0} {1}".format(outfilename, dotfile.name))
dotfile.close()
def Lformat(L, pl=6):
"""
Line format utility function.
L: a list of tags.
pl: the number of tags per line
returns a string representation of the tags with exactly pl tags
per line.
"""
lines = []
for i, v in enumerate(L):
lines.append(v)
if i % pl == pl - 1:
lines.append("\\n")
return ",".join(lines)
def tag_leaves(tree):
"""
Decorates the leaves of a tree with consecutive numeric tags
going from 0 to NUMBER_OF_LEAVES-1.
The tag is added inside each leaf object in parameter tag
(leaf.tag).
Returns NUMBER_OF_LEAVES-1.
As an example consider this three leaved tree:
>>> T = Node(Node(Leaf([0]),\
Leaf([1]),\
0,0,[]),\
Leaf([2]),\
0,0,[])
>>> tag_leaves(T)
2
>>> list(map(lambda l: l.tag, leaf_iterator(T)))
[0, 1, 2]
"""
leaves = list(leaf_iterator(tree))
for i, leaf in enumerate(leaves):
leaf.tag = i
return len(leaves) - 1
def leaf_iterator(tree):
"""
Returns a generator that iterates over all leaves of a tree
in depth first order.
As an example consider this three leaved tree:
>>> T = Node(Node(Leaf([0]),\
Leaf([1]),\
0,0,[]),\
Leaf([2]),\
0,0,[])
>>> list(leaf_iterator(T))
[L([0]), L([1]), L([2])]
"""
if isinstance(tree, Leaf):
yield tree
else:
for subtree in [tree.left, tree.right]:
for n in leaf_iterator(subtree):
yield n
def node_iterator(tree):
"""
Returns a generator that iterates over all nodes of a tree
in depth first order.
"""
if isinstance(tree, Leaf):
yield tree
else:
yield tree
for subtree in [tree.left, tree.right]:
for n in node_iterator(subtree):
yield n
def save_tree(T, output_file):
"""
Save a tree T to a file.
"""
f = open(output_file, "w")
pickle.dump(T, f)
f.close()
def load_tree(input_file):
"""
Load a tree T from a file.
"""
f = open(input_file, "r")
l = pickle.load(f)
f.close()
return l
class Node():
"""
Regression tree internal node.
"""
def __init__(self, left, right, axis, cut, model, categorical=False):
"""
left, right (Node) : left and right subtrees
axis (int) : axis over which this node splits the domain
cut : cut point for this node
* for ordinal data: a value v, the cut is done for
(all x < v , all x >= v)
* for categorical data: a 2-uplet containing the
two classes. For example to separate odd and even
categories ([1,3,5], [2,4,6]).
model : the linear model associated to this subtree
categorical : is this cut categorical ?
"""
self.left = left
self.right = right
self.axis = axis
self.model = model
self.cut = cut
self.categorical = categorical
self.data = []
def fill(self, point):
"""
point: array of coordinates for a point
returns the value predicted by the tree for the
given point.
"""
self.data.append(point)
p = point[self.axis]
if self.categorical:
is_left = (p in self.cut[0])
else:
is_left = p < self.cut
if is_left:
return self.left.fill(point)
else:
return self.right.fill(point)
def compute(self, point):
"""
point: array of coordinates for a point
returns the value predicted by the tree for the
given point.
"""
p = point[self.axis]
if self.categorical:
is_left = (p in self.cut[0])
else:
is_left = p < self.cut
if is_left:
return self.left.compute(point)
else:
return self.right.compute(point)
def whichnode(self, point):
"""
point: array of coordinates for a point
returns the leaf responsible for modeling
the passed point.
"""
p = point[self.axis]
if self.categorical:
is_left = p in self.cut[0]
else:
is_left = p < self.cut
if is_left:
return self.left.whichnode(point)
else:
return self.right.whichnode(point)
def whichmodel(self, point):
"""
point: array of coordinates for a point
returns the tag of the leaf responsible for modeling
the passed point.
"""
p = point[self.axis]
if self.categorical:
is_left = p in self.cut[0]
else:
is_left = p < self.cut
if is_left:
return self.left.whichmodel(point)
else:
return self.right.whichmodel(point)
def todot(self, axes=None, categories=None):
"""
Internal function to output the tree in dot format.
"""
out = []
if self.categorical:
labelsl = Lformat([categories[self.axis][i] for i in self.cut[0]])
labelsr = Lformat([categories[self.axis][i] for i in self.cut[1]])
out.append("{0} [label=\"{1}\"];"
.format(id(self), axes[self.axis]))
else:
labelsr = labelsl = ""
out.append("{0} [label=\"{1} < {2}\"];"
.format(id(self), axes[self.axis], "%.2f" % self.cut))
out.append(self.left.todot(axes, categories))
out.append(self.right.todot(axes, categories))
out.append("{0} -- {1} [label=\"{2}\"];"
.format(id(self), id(self.left), labelsl))
out.append("{0} -- {1} [label=\"{2}\"];"
.format(id(self), id(self.right), labelsr))
return "\n".join(out)
class Leaf(Node):
"""
Regression tree leaf node.
"""
def __init__(self, model, error=None):
"""
model : the linear model associated to this leaf
"""
self.model = model
self.error = error
self.data = []
self.future_points = 0
self.tag = -1
def compute(self, point):
"""
This function computes the prediction of Tree ''self'' on
point ''point''.
"""
regression_d = len(self.model) - 1
if regression_d == 0:
return self.model[0]
else:
p = point[-regression_d:]
return linear(self.model, *p)
def fill(self, point):
self.data.append(point)
def whichmodel(self, point=None):
return self.tag
def whichnode(self, point=None):
return self
def __repr__(self):
return "L({0})".format(repr(self.model))
def todot(self, axes=None, categories=None):
model = "*%s*|" % self.tag + "|".join(["%.4f" % v for v in self.model])
return (
"{0} [label=\"{{{1}}}\", shape=\"record\",color=\"{2}\"];"
.format(id(self), model, colors[self.tag % len(colors)]))
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-2.0 |
baitur/w2ui | server/python/django_w2ui/django_w2ui/w2lib.py | 51 | 4798 | class w2Grid:
def __init__(self,conn):
self.conn = conn
def getRecords(self, sql, request, cql=None):
sql_components = { 'where': [], 'params': [], 'sort': [] }
if request.get('search',[]):
for search in request['search']:
operator = "="
field = search['field'] # TODO: protect from sql injection!!!
value = [ search['value'] ]
op = search['operator'].lower()
if op == "begins":
operator = "LIKE ?||'%%'"
elif op == "ends":
operator = "LIKE '%%'||?"
elif op == "contains":
operator = "LIKE '%%'||?||'%%'"
elif op == "is":
operator = "= LOWER(?)"
elif op == "between":
value = value[0]
operator = "BETWEEN ? AND ?"
elif op == "in":
value = value[0]
operator = "IN (%s)" % ','.join(['?'] * len(value))
sql_components['where'].append("%s %s" % (field,operator))
for v in value:
sql_components['params'].append(v)
if request.get('sort',[]):
for sort in request['sort']:
field = sort['field'] # TODO: protect from sql injection!!!
dir_ = sort['direction'] # TODO: protect from sql injection!!!
sql_components['sort'].append(field+' '+dir_)
connector = ' %s ' % request.get('searchLogic','AND') # TODO: protect from sql injection!!!
where = connector.join(sql_components['where'])
if not where:
where = '1=1'
sort = ",".join(sql_components['sort'])
if not sort:
sort = '1'
sql = sql.replace("~search~",where)
sql = sql.replace("~order~","~sort~")
sql = sql.replace("~sort~",sort)
if not cql:
cql = "SELECT count(1) FROM (%s) as grid_list_1" % sql
limit = 50
offset = 0
try:
limit = abs(int(request['limit']))
except:
pass
try:
offset = abs(int(request['offset']))
except:
pass
sql += " LIMIT %s OFFSET %s" % (limit,offset)
data = {}
try:
cursor = self.conn.cursor()
# count records
cursor.execute(cql,sql_components['params'])
data['status'] = 'success'
data['total'] = cursor.fetchone()[0]
# execute sql
data['records'] = []
rows = cursor.execute(sql,sql_components['params'])
columns = [ d[0] for d in cursor.description ]
columns[0] = "recid"
for row in rows:
record = zip(columns,list(row))
data['records'].append( dict(record) )
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def deleteRecords(self, table, keyField, request):
recs = request['selected']
# TODO: protect table, keyField from sql injection!!!
sql = "DELETE FROM %s WHERE %s IN (%s)" % (table, keyField,','.join(['?'] * len(recs)))
data = {}
try:
cursor = self.conn.cursor()
cursor.execute(sql,recs)
self.conn.commit()
data['status'] = 'success'
data['message'] = ''
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def getRecord(self, sql, recid):
data = {}
try:
cursor = self.conn.cursor()
# execute sql
cursor.execute(sql,[recid])
data['status'] = 'success'
data['message'] = ''
columns = [ d[0] for d in cursor.description ]
row = cursor.fetchone()
record = zip(columns,list(row))[1:]
data['record'] = dict(record)
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def saveRecord(self, table, keyField, request):
# TODO: protect table, keyField, field names from sql injection!!!
fields, values = [], []
for k, v in request['record'].items():
if k == keyField: continue # key field should not be here
fields.append(k)
if v.startswith('__'):
v = v[2:]
elif v == "":
v = None
values.append(v)
if request.get('recid','0') == '0':
sql = "INSERT INTO %s (%s) VALUES (%s)" % (table,','.join(fields),','.join(['?']*len(fields)))
else:
sql = "UPDATE %s SET %s WHERE %s = ?" % (table, ','.join([ '%s=?' % f for f in fields ]), keyField)
values.append( request['recid'] )
data = {}
try:
cursor = self.conn.cursor()
cursor.execute(sql,values)
self.conn.commit()
data['status'] = 'success'
data['message'] = ''
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def newRecord(self, table, data):
return self.saveRecord(table, '', {'recid': 0, 'record': data})
def getItems(self, sql):
# TODO: what's this function for?
return {}
| mit |
royalharsh/flatbuffers | tests/py_test.py | 4 | 66237 | # coding=utf-8
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
import imp
PY_VERSION = sys.version_info[:2]
import ctypes
from collections import defaultdict
import math
import random
import timeit
import unittest
from flatbuffers import compat
from flatbuffers import util
from flatbuffers.compat import range_func as compat_range
from flatbuffers.compat import NumpyRequiredForThisFeature
import flatbuffers
from flatbuffers import number_types as N
import MyGame # refers to generated code
import MyGame.Example # refers to generated code
import MyGame.Example.Any # refers to generated code
import MyGame.Example.Color # refers to generated code
import MyGame.Example.Monster # refers to generated code
import MyGame.Example.Test # refers to generated code
import MyGame.Example.Stat # refers to generated code
import MyGame.Example.Vec3 # refers to generated code
import MyGame.MonsterExtra # refers to generated code
import MyGame.Example.ArrayTable # refers to generated code
import MyGame.Example.ArrayStruct # refers to generated code
import MyGame.Example.NestedStruct # refers to generated code
import MyGame.Example.TestEnum # refers to generated code
def assertRaises(test_case, fn, exception_class):
''' Backwards-compatible assertion for exceptions raised. '''
exc = None
try:
fn()
except Exception as e:
exc = e
test_case.assertTrue(exc is not None)
test_case.assertTrue(isinstance(exc, exception_class))
class TestWireFormat(unittest.TestCase):
def test_wire_format(self):
# Verify that using the generated Python code builds a buffer without
# returning errors, and is interpreted correctly, for size prefixed
# representation and regular:
for sizePrefix in [True, False]:
for file_identifier in [None, b"MONS"]:
gen_buf, gen_off = make_monster_from_generated_code(sizePrefix=sizePrefix, file_identifier=file_identifier)
CheckReadBuffer(gen_buf, gen_off, sizePrefix=sizePrefix, file_identifier=file_identifier)
# Verify that the canonical flatbuffer file is readable by the
# generated Python code. Note that context managers are not part of
# Python 2.5, so we use the simpler open/close methods here:
f = open('monsterdata_test.mon', 'rb')
canonicalWireData = f.read()
f.close()
CheckReadBuffer(bytearray(canonicalWireData), 0, file_identifier=b'MONS')
# Write the generated buffer out to a file:
f = open('monsterdata_python_wire.mon', 'wb')
f.write(gen_buf[gen_off:])
f.close()
def CheckReadBuffer(buf, offset, sizePrefix=False, file_identifier=None):
''' CheckReadBuffer checks that the given buffer is evaluated correctly
as the example Monster. '''
def asserter(stmt):
''' An assertion helper that is separated from TestCase classes. '''
if not stmt:
raise AssertionError('CheckReadBuffer case failed')
if file_identifier:
# test prior to removal of size_prefix
asserter(util.GetBufferIdentifier(buf, offset, size_prefixed=sizePrefix) == file_identifier)
asserter(util.BufferHasIdentifier(buf, offset, file_identifier=file_identifier, size_prefixed=sizePrefix))
if sizePrefix:
size = util.GetSizePrefix(buf, offset)
asserter(size == len(buf[offset:])-4)
buf, offset = util.RemoveSizePrefix(buf, offset)
if file_identifier:
asserter(MyGame.Example.Monster.Monster.MonsterBufferHasIdentifier(buf, offset))
else:
asserter(not MyGame.Example.Monster.Monster.MonsterBufferHasIdentifier(buf, offset))
monster = MyGame.Example.Monster.Monster.GetRootAsMonster(buf, offset)
asserter(monster.Hp() == 80)
asserter(monster.Mana() == 150)
asserter(monster.Name() == b'MyMonster')
# initialize a Vec3 from Pos()
vec = monster.Pos()
asserter(vec is not None)
# verify the properties of the Vec3
asserter(vec.X() == 1.0)
asserter(vec.Y() == 2.0)
asserter(vec.Z() == 3.0)
asserter(vec.Test1() == 3.0)
asserter(vec.Test2() == 2)
# initialize a Test from Test3(...)
t = MyGame.Example.Test.Test()
t = vec.Test3(t)
asserter(t is not None)
# verify the properties of the Test
asserter(t.A() == 5)
asserter(t.B() == 6)
# verify that the enum code matches the enum declaration:
union_type = MyGame.Example.Any.Any
asserter(monster.TestType() == union_type.Monster)
# initialize a Table from a union field Test(...)
table2 = monster.Test()
asserter(type(table2) is flatbuffers.table.Table)
# initialize a Monster from the Table from the union
monster2 = MyGame.Example.Monster.Monster()
monster2.Init(table2.Bytes, table2.Pos)
asserter(monster2.Name() == b"Fred")
# iterate through the first monster's inventory:
asserter(monster.InventoryLength() == 5)
invsum = 0
for i in compat_range(monster.InventoryLength()):
v = monster.Inventory(i)
invsum += int(v)
asserter(invsum == 10)
for i in range(5):
asserter(monster.VectorOfLongs(i) == 10 ** (i * 2))
asserter(([-1.7976931348623157e+308, 0, 1.7976931348623157e+308]
== [monster.VectorOfDoubles(i)
for i in range(monster.VectorOfDoublesLength())]))
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
asserter(monster.InventoryAsNumpy().sum() == 10)
asserter(monster.InventoryAsNumpy().dtype == np.dtype('uint8'))
VectorOfLongs = monster.VectorOfLongsAsNumpy()
asserter(VectorOfLongs.dtype == np.dtype('int64'))
for i in range(5):
asserter(VectorOfLongs[i] == 10 ** (i * 2))
VectorOfDoubles = monster.VectorOfDoublesAsNumpy()
asserter(VectorOfDoubles.dtype == np.dtype('float64'))
asserter(VectorOfDoubles[0] == np.finfo('float64').min)
asserter(VectorOfDoubles[1] == 0.0)
asserter(VectorOfDoubles[2] == np.finfo('float64').max)
except ImportError:
# If numpy does not exist, trying to get vector as numpy
# array should raise NumpyRequiredForThisFeature. The way
# assertRaises has been implemented prevents us from
# asserting this error is raised outside of a test case.
pass
asserter(monster.Test4Length() == 2)
# create a 'Test' object and populate it:
test0 = monster.Test4(0)
asserter(type(test0) is MyGame.Example.Test.Test)
test1 = monster.Test4(1)
asserter(type(test1) is MyGame.Example.Test.Test)
# the position of test0 and test1 are swapped in monsterdata_java_wire
# and monsterdata_test_wire, so ignore ordering
v0 = test0.A()
v1 = test0.B()
v2 = test1.A()
v3 = test1.B()
sumtest12 = int(v0) + int(v1) + int(v2) + int(v3)
asserter(sumtest12 == 100)
asserter(monster.TestarrayofstringLength() == 2)
asserter(monster.Testarrayofstring(0) == b"test1")
asserter(monster.Testarrayofstring(1) == b"test2")
asserter(monster.TestarrayoftablesLength() == 0)
asserter(monster.TestnestedflatbufferLength() == 0)
asserter(monster.Testempty() is None)
class TestFuzz(unittest.TestCase):
''' Low level stress/fuzz test: serialize/deserialize a variety of
different kinds of data in different combinations '''
binary_type = compat.binary_types[0] # this will always exist
ofInt32Bytes = binary_type([0x83, 0x33, 0x33, 0x33])
ofInt64Bytes = binary_type([0x84, 0x44, 0x44, 0x44,
0x44, 0x44, 0x44, 0x44])
overflowingInt32Val = flatbuffers.encode.Get(flatbuffers.packer.int32,
ofInt32Bytes, 0)
overflowingInt64Val = flatbuffers.encode.Get(flatbuffers.packer.int64,
ofInt64Bytes, 0)
# Values we're testing against: chosen to ensure no bits get chopped
# off anywhere, and also be different from eachother.
boolVal = True
int8Val = N.Int8Flags.py_type(-127) # 0x81
uint8Val = N.Uint8Flags.py_type(0xFF)
int16Val = N.Int16Flags.py_type(-32222) # 0x8222
uint16Val = N.Uint16Flags.py_type(0xFEEE)
int32Val = N.Int32Flags.py_type(overflowingInt32Val)
uint32Val = N.Uint32Flags.py_type(0xFDDDDDDD)
int64Val = N.Int64Flags.py_type(overflowingInt64Val)
uint64Val = N.Uint64Flags.py_type(0xFCCCCCCCCCCCCCCC)
# Python uses doubles, so force it here
float32Val = N.Float32Flags.py_type(ctypes.c_float(3.14159).value)
float64Val = N.Float64Flags.py_type(3.14159265359)
def test_fuzz(self):
return self.check_once(11, 100)
def check_once(self, fuzzFields, fuzzObjects):
testValuesMax = 11 # hardcoded to the number of scalar types
builder = flatbuffers.Builder(0)
l = LCG()
objects = [0 for _ in compat_range(fuzzObjects)]
# Generate fuzzObjects random objects each consisting of
# fuzzFields fields, each of a random type.
for i in compat_range(fuzzObjects):
builder.StartObject(fuzzFields)
for j in compat_range(fuzzFields):
choice = int(l.Next()) % testValuesMax
if choice == 0:
builder.PrependBoolSlot(int(j), self.boolVal, False)
elif choice == 1:
builder.PrependInt8Slot(int(j), self.int8Val, 0)
elif choice == 2:
builder.PrependUint8Slot(int(j), self.uint8Val, 0)
elif choice == 3:
builder.PrependInt16Slot(int(j), self.int16Val, 0)
elif choice == 4:
builder.PrependUint16Slot(int(j), self.uint16Val, 0)
elif choice == 5:
builder.PrependInt32Slot(int(j), self.int32Val, 0)
elif choice == 6:
builder.PrependUint32Slot(int(j), self.uint32Val, 0)
elif choice == 7:
builder.PrependInt64Slot(int(j), self.int64Val, 0)
elif choice == 8:
builder.PrependUint64Slot(int(j), self.uint64Val, 0)
elif choice == 9:
builder.PrependFloat32Slot(int(j), self.float32Val, 0)
elif choice == 10:
builder.PrependFloat64Slot(int(j), self.float64Val, 0)
else:
raise RuntimeError('unreachable')
off = builder.EndObject()
# store the offset from the end of the builder buffer,
# since it will keep growing:
objects[i] = off
# Do some bookkeeping to generate stats on fuzzes:
stats = defaultdict(int)
def check(table, desc, want, got):
stats[desc] += 1
self.assertEqual(want, got, "%s != %s, %s" % (want, got, desc))
l = LCG() # Reset.
# Test that all objects we generated are readable and return the
# expected values. We generate random objects in the same order
# so this is deterministic.
for i in compat_range(fuzzObjects):
table = flatbuffers.table.Table(builder.Bytes,
len(builder.Bytes) - objects[i])
for j in compat_range(fuzzFields):
field_count = flatbuffers.builder.VtableMetadataFields + j
f = N.VOffsetTFlags.py_type(field_count *
N.VOffsetTFlags.bytewidth)
choice = int(l.Next()) % testValuesMax
if choice == 0:
check(table, "bool", self.boolVal,
table.GetSlot(f, False, N.BoolFlags))
elif choice == 1:
check(table, "int8", self.int8Val,
table.GetSlot(f, 0, N.Int8Flags))
elif choice == 2:
check(table, "uint8", self.uint8Val,
table.GetSlot(f, 0, N.Uint8Flags))
elif choice == 3:
check(table, "int16", self.int16Val,
table.GetSlot(f, 0, N.Int16Flags))
elif choice == 4:
check(table, "uint16", self.uint16Val,
table.GetSlot(f, 0, N.Uint16Flags))
elif choice == 5:
check(table, "int32", self.int32Val,
table.GetSlot(f, 0, N.Int32Flags))
elif choice == 6:
check(table, "uint32", self.uint32Val,
table.GetSlot(f, 0, N.Uint32Flags))
elif choice == 7:
check(table, "int64", self.int64Val,
table.GetSlot(f, 0, N.Int64Flags))
elif choice == 8:
check(table, "uint64", self.uint64Val,
table.GetSlot(f, 0, N.Uint64Flags))
elif choice == 9:
check(table, "float32", self.float32Val,
table.GetSlot(f, 0, N.Float32Flags))
elif choice == 10:
check(table, "float64", self.float64Val,
table.GetSlot(f, 0, N.Float64Flags))
else:
raise RuntimeError('unreachable')
# If enough checks were made, verify that all scalar types were used:
self.assertEqual(testValuesMax, len(stats),
"fuzzing failed to test all scalar types: %s" % stats)
class TestByteLayout(unittest.TestCase):
''' TestByteLayout checks the bytes of a Builder in various scenarios. '''
def assertBuilderEquals(self, builder, want_chars_or_ints):
def integerize(x):
if isinstance(x, compat.string_types):
return ord(x)
return x
want_ints = list(map(integerize, want_chars_or_ints))
want = bytearray(want_ints)
got = builder.Bytes[builder.Head():] # use the buffer directly
self.assertEqual(want, got)
def test_numbers(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.PrependBool(True)
self.assertBuilderEquals(b, [1])
b.PrependInt8(-127)
self.assertBuilderEquals(b, [129, 1])
b.PrependUint8(255)
self.assertBuilderEquals(b, [255, 129, 1])
b.PrependInt16(-32222)
self.assertBuilderEquals(b, [0x22, 0x82, 0, 255, 129, 1]) # first pad
b.PrependUint16(0xFEEE)
# no pad this time:
self.assertBuilderEquals(b, [0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1])
b.PrependInt32(-53687092)
self.assertBuilderEquals(b, [204, 204, 204, 252, 0xEE, 0xFE,
0x22, 0x82, 0, 255, 129, 1])
b.PrependUint32(0x98765432)
self.assertBuilderEquals(b, [0x32, 0x54, 0x76, 0x98,
204, 204, 204, 252,
0xEE, 0xFE, 0x22, 0x82,
0, 255, 129, 1])
def test_numbers64(self):
b = flatbuffers.Builder(0)
b.PrependUint64(0x1122334455667788)
self.assertBuilderEquals(b, [0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11])
b = flatbuffers.Builder(0)
b.PrependInt64(0x1122334455667788)
self.assertBuilderEquals(b, [0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11])
def test_1xbyte_vector(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 1, 1)
self.assertBuilderEquals(b, [0, 0, 0]) # align to 4bytes
b.PrependByte(1)
self.assertBuilderEquals(b, [1, 0, 0, 0])
b.EndVector(1)
self.assertBuilderEquals(b, [1, 0, 0, 0, 1, 0, 0, 0]) # padding
def test_2xbyte_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 2, 1)
self.assertBuilderEquals(b, [0, 0]) # align to 4bytes
b.PrependByte(1)
self.assertBuilderEquals(b, [1, 0, 0])
b.PrependByte(2)
self.assertBuilderEquals(b, [2, 1, 0, 0])
b.EndVector(2)
self.assertBuilderEquals(b, [2, 0, 0, 0, 2, 1, 0, 0]) # padding
def test_1xuint16_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint16Flags.bytewidth, 1, 1)
self.assertBuilderEquals(b, [0, 0]) # align to 4bytes
b.PrependUint16(1)
self.assertBuilderEquals(b, [1, 0, 0, 0])
b.EndVector(1)
self.assertBuilderEquals(b, [1, 0, 0, 0, 1, 0, 0, 0]) # padding
def test_2xuint16_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint16Flags.bytewidth, 2, 1)
self.assertBuilderEquals(b, []) # align to 4bytes
b.PrependUint16(0xABCD)
self.assertBuilderEquals(b, [0xCD, 0xAB])
b.PrependUint16(0xDCBA)
self.assertBuilderEquals(b, [0xBA, 0xDC, 0xCD, 0xAB])
b.EndVector(2)
self.assertBuilderEquals(b, [2, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB])
def test_create_ascii_string(self):
b = flatbuffers.Builder(0)
b.CreateString(u"foo", encoding='ascii')
# 0-terminated, no pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 'f', 'o', 'o', 0])
b.CreateString(u"moop", encoding='ascii')
# 0-terminated, 3-byte pad:
self.assertBuilderEquals(b, [4, 0, 0, 0, 'm', 'o', 'o', 'p',
0, 0, 0, 0,
3, 0, 0, 0, 'f', 'o', 'o', 0])
def test_create_utf8_string(self):
b = flatbuffers.Builder(0)
b.CreateString(u"Цлїςσδε")
self.assertBuilderEquals(b, "\x0e\x00\x00\x00\xd0\xa6\xd0\xbb\xd1\x97" \
"\xcf\x82\xcf\x83\xce\xb4\xce\xb5\x00\x00")
b.CreateString(u"フムアムカモケモ")
self.assertBuilderEquals(b, "\x18\x00\x00\x00\xef\xbe\x8c\xef\xbe\x91" \
"\xef\xbd\xb1\xef\xbe\x91\xef\xbd\xb6\xef\xbe\x93\xef\xbd\xb9\xef" \
"\xbe\x93\x00\x00\x00\x00\x0e\x00\x00\x00\xd0\xa6\xd0\xbb\xd1\x97" \
"\xcf\x82\xcf\x83\xce\xb4\xce\xb5\x00\x00")
def test_create_arbitrary_string(self):
b = flatbuffers.Builder(0)
s = "\x01\x02\x03"
b.CreateString(s) # Default encoding is utf-8.
# 0-terminated, no pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 1, 2, 3, 0])
s2 = "\x04\x05\x06\x07"
b.CreateString(s2) # Default encoding is utf-8.
# 0-terminated, 3-byte pad:
self.assertBuilderEquals(b, [4, 0, 0, 0, 4, 5, 6, 7, 0, 0, 0, 0,
3, 0, 0, 0, 1, 2, 3, 0])
def test_create_byte_vector(self):
b = flatbuffers.Builder(0)
b.CreateByteVector(b"")
# 0-byte pad:
self.assertBuilderEquals(b, [0, 0, 0, 0])
b = flatbuffers.Builder(0)
b.CreateByteVector(b"\x01\x02\x03")
# 1-byte pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 1, 2, 3, 0])
def test_create_numpy_vector_int8(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -3], dtype=np.int8)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 2, 256 - 3, 0 # vector value + padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 2, 256 - 3, 0 # vector value + padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_uint16(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, 312], dtype=np.uint16)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, # 1
2, 0, # 2
312 - 256, 1, # 312
0, 0 # padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, # 1
2, 0, # 2
312 - 256, 1, # 312
0, 0 # padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_int64(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.int64)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 0, 0, 0, 0, 0, 0, # 1
2, 0, 0, 0, 0, 0, 0, 0, # 2
256 - 12, 255, 255, 255, 255, 255, 255, 255 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 0, 0, 0, 0, 0, 0, # 1
2, 0, 0, 0, 0, 0, 0, 0, # 2
256 - 12, 255, 255, 255, 255, 255, 255, 255 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_float32(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.float32)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 128, 63, # 1
0, 0, 0, 64, # 2
0, 0, 64, 193 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 128, 63, # 1
0, 0, 0, 64, # 2
0, 0, 64, 193 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_float64(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.float64)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 0, 0, 0, 0, 240, 63, # 1
0, 0, 0, 0, 0, 0, 0, 64, # 2
0, 0, 0, 0, 0, 0, 40, 192 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 0, 0, 0, 0, 240, 63, # 1
0, 0, 0, 0, 0, 0, 0, 64, # 2
0, 0, 0, 0, 0, 0, 40, 192 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_bool(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([True, False, True], dtype=np.bool)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 1, 0 # vector values + padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 1, 0 # vector values + padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_reject_strings(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Create String array
b = flatbuffers.Builder(0)
x = np.array(["hello", "fb", "testing"])
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
TypeError)
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_reject_object(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Create String array
b = flatbuffers.Builder(0)
x = np.array([{"m": 0}, {"as": -2.1, 'c': 'c'}])
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
TypeError)
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_empty_vtable(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
self.assertBuilderEquals(b, [])
b.EndObject()
self.assertBuilderEquals(b, [4, 0, 4, 0, 4, 0, 0, 0])
def test_vtable_with_one_true_bool(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartObject(1)
self.assertBuilderEquals(b, [])
b.PrependBoolSlot(0, True, False)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # length of object including vtable offset
7, 0, # start of bool value
6, 0, 0, 0, # offset for start of vtable (int32)
0, 0, 0, # padded to 4 bytes
1, # bool value
])
def test_vtable_with_one_default_bool(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartObject(1)
self.assertBuilderEquals(b, [])
b.PrependBoolSlot(0, False, False)
b.EndObject()
self.assertBuilderEquals(b, [
4, 0, # vtable bytes
4, 0, # end of object from here
# entry 1 is zero and not stored
4, 0, 0, 0, # offset for start of vtable (int32)
])
def test_vtable_with_one_int16(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.PrependInt16Slot(0, 0x789A, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value
6, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding to 4 bytes
0x9A, 0x78,
])
def test_vtable_with_two_int16(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt16Slot(0, 0x3456, 0)
b.PrependInt16Slot(1, 0x789A, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value 0
4, 0, # offset to value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0x9A, 0x78, # value 1
0x56, 0x34, # value 0
])
def test_vtable_with_int16_and_bool(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt16Slot(0, 0x3456, 0)
b.PrependBoolSlot(1, True, False)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value 0
5, 0, # offset to value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0, # padding
1, # value 1
0x56, 0x34, # value 0
])
def test_vtable_with_empty_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 0, 1)
vecend = b.EndVector(0)
b.StartObject(1)
b.PrependUOffsetTRelativeSlot(0, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0,
4, 0, # offset to vector offset
6, 0, 0, 0, # offset for start of vtable (int32)
4, 0, 0, 0,
0, 0, 0, 0, # length of vector (not in struct)
])
def test_vtable_with_empty_vector_of_byte_and_some_scalars(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 0, 1)
vecend = b.EndVector(0)
b.StartObject(2)
b.PrependInt16Slot(0, 55, 0)
b.PrependUOffsetTRelativeSlot(1, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
12, 0,
10, 0, # offset to value 0
4, 0, # offset to vector offset
8, 0, 0, 0, # vtable loc
8, 0, 0, 0, # value 1
0, 0, 55, 0, # value 0
0, 0, 0, 0, # length of vector (not in struct)
])
def test_vtable_with_1_int16_and_2vector_of_int16(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Int16Flags.bytewidth, 2, 1)
b.PrependInt16(0x1234)
b.PrependInt16(0x5678)
vecend = b.EndVector(2)
b.StartObject(2)
b.PrependUOffsetTRelativeSlot(1, vecend, 0)
b.PrependInt16Slot(0, 55, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
12, 0, # length of object
6, 0, # start of value 0 from end of vtable
8, 0, # start of value 1 from end of buffer
8, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding
55, 0, # value 0
4, 0, 0, 0, # vector position from here
2, 0, 0, 0, # length of vector (uint32)
0x78, 0x56, # vector value 1
0x34, 0x12, # vector value 0
])
def test_vtable_with_1_struct_of_1_int8__1_int16__1_int32(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.Prep(4+4+4, 0)
b.PrependInt8(55)
b.Pad(3)
b.PrependInt16(0x1234)
b.Pad(2)
b.PrependInt32(0x12345678)
structStart = b.Offset()
b.PrependStructSlot(0, structStart, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
16, 0, # end of object from here
4, 0, # start of struct from here
6, 0, 0, 0, # offset for start of vtable (int32)
0x78, 0x56, 0x34, 0x12, # value 2
0, 0, # padding
0x34, 0x12, # value 1
0, 0, 0, # padding
55, # value 0
])
def test_vtable_with_1_vector_of_2_struct_of_2_int8(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Int8Flags.bytewidth*2, 2, 1)
b.PrependInt8(33)
b.PrependInt8(44)
b.PrependInt8(55)
b.PrependInt8(66)
vecend = b.EndVector(2)
b.StartObject(1)
b.PrependUOffsetTRelativeSlot(0, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0,
4, 0, # offset of vector offset
6, 0, 0, 0, # offset for start of vtable (int32)
4, 0, 0, 0, # vector start offset
2, 0, 0, 0, # vector length
66, # vector value 1,1
55, # vector value 1,0
44, # vector value 0,1
33, # vector value 0,0
])
def test_table_with_some_elements(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt8Slot(0, 33, 0)
b.PrependInt16Slot(1, 66, 0)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
12, 0, 0, 0, # root of table: points to vtable offset
8, 0, # vtable bytes
8, 0, # end of object from here
7, 0, # start of value 0
4, 0, # start of value 1
8, 0, 0, 0, # offset for start of vtable (int32)
66, 0, # value 1
0, # padding
33, # value 0
])
def test__one_unfinished_table_and_one_finished_table(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt8Slot(0, 33, 0)
b.PrependInt8Slot(1, 44, 0)
off = b.EndObject()
b.Finish(off)
b.StartObject(3)
b.PrependInt8Slot(0, 55, 0)
b.PrependInt8Slot(1, 66, 0)
b.PrependInt8Slot(2, 77, 0)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
16, 0, 0, 0, # root of table: points to object
0, 0, # padding
10, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
5, 0, # start of value 2
10, 0, 0, 0, # offset for start of vtable (int32)
0, # padding
77, # value 2
66, # value 1
55, # value 0
12, 0, 0, 0, # root of table: points to object
8, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding
44, # value 1
33, # value 0
])
def test_a_bunch_of_bools(self):
b = flatbuffers.Builder(0)
b.StartObject(8)
b.PrependBoolSlot(0, True, False)
b.PrependBoolSlot(1, True, False)
b.PrependBoolSlot(2, True, False)
b.PrependBoolSlot(3, True, False)
b.PrependBoolSlot(4, True, False)
b.PrependBoolSlot(5, True, False)
b.PrependBoolSlot(6, True, False)
b.PrependBoolSlot(7, True, False)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
24, 0, 0, 0, # root of table: points to vtable offset
20, 0, # vtable bytes
12, 0, # size of object
11, 0, # start of value 0
10, 0, # start of value 1
9, 0, # start of value 2
8, 0, # start of value 3
7, 0, # start of value 4
6, 0, # start of value 5
5, 0, # start of value 6
4, 0, # start of value 7
20, 0, 0, 0, # vtable offset
1, # value 7
1, # value 6
1, # value 5
1, # value 4
1, # value 3
1, # value 2
1, # value 1
1, # value 0
])
def test_three_bools(self):
b = flatbuffers.Builder(0)
b.StartObject(3)
b.PrependBoolSlot(0, True, False)
b.PrependBoolSlot(1, True, False)
b.PrependBoolSlot(2, True, False)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
16, 0, 0, 0, # root of table: points to vtable offset
0, 0, # padding
10, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
5, 0, # start of value 2
10, 0, 0, 0, # vtable offset from here
0, # padding
1, # value 2
1, # value 1
1, # value 0
])
def test_some_floats(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.PrependFloat32Slot(0, 1.0, 0.0)
off = b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # size of object
4, 0, # start of value 0
6, 0, 0, 0, # vtable offset
0, 0, 128, 63, # value 0
])
def make_monster_from_generated_code(sizePrefix = False, file_identifier=None):
''' Use generated code to build the example Monster. '''
b = flatbuffers.Builder(0)
string = b.CreateString("MyMonster")
test1 = b.CreateString("test1")
test2 = b.CreateString("test2")
fred = b.CreateString("Fred")
MyGame.Example.Monster.MonsterStartInventoryVector(b, 5)
b.PrependByte(4)
b.PrependByte(3)
b.PrependByte(2)
b.PrependByte(1)
b.PrependByte(0)
inv = b.EndVector(5)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddName(b, fred)
mon2 = MyGame.Example.Monster.MonsterEnd(b)
MyGame.Example.Monster.MonsterStartTest4Vector(b, 2)
MyGame.Example.Test.CreateTest(b, 10, 20)
MyGame.Example.Test.CreateTest(b, 30, 40)
test4 = b.EndVector(2)
MyGame.Example.Monster.MonsterStartTestarrayofstringVector(b, 2)
b.PrependUOffsetTRelative(test2)
b.PrependUOffsetTRelative(test1)
testArrayOfString = b.EndVector(2)
MyGame.Example.Monster.MonsterStartVectorOfLongsVector(b, 5)
b.PrependInt64(100000000)
b.PrependInt64(1000000)
b.PrependInt64(10000)
b.PrependInt64(100)
b.PrependInt64(1)
VectorOfLongs = b.EndVector(5)
MyGame.Example.Monster.MonsterStartVectorOfDoublesVector(b, 3)
b.PrependFloat64(1.7976931348623157e+308)
b.PrependFloat64(0)
b.PrependFloat64(-1.7976931348623157e+308)
VectorOfDoubles = b.EndVector(3)
MyGame.Example.Monster.MonsterStart(b)
pos = MyGame.Example.Vec3.CreateVec3(b, 1.0, 2.0, 3.0, 3.0, 2, 5, 6)
MyGame.Example.Monster.MonsterAddPos(b, pos)
MyGame.Example.Monster.MonsterAddHp(b, 80)
MyGame.Example.Monster.MonsterAddName(b, string)
MyGame.Example.Monster.MonsterAddInventory(b, inv)
MyGame.Example.Monster.MonsterAddTestType(b, 1)
MyGame.Example.Monster.MonsterAddTest(b, mon2)
MyGame.Example.Monster.MonsterAddTest4(b, test4)
MyGame.Example.Monster.MonsterAddTestarrayofstring(b, testArrayOfString)
MyGame.Example.Monster.MonsterAddVectorOfLongs(b, VectorOfLongs)
MyGame.Example.Monster.MonsterAddVectorOfDoubles(b, VectorOfDoubles)
mon = MyGame.Example.Monster.MonsterEnd(b)
if sizePrefix:
b.FinishSizePrefixed(mon, file_identifier)
else:
b.Finish(mon, file_identifier)
return b.Bytes, b.Head()
class TestAllCodePathsOfExampleSchema(unittest.TestCase):
def setUp(self, *args, **kwargs):
super(TestAllCodePathsOfExampleSchema, self).setUp(*args, **kwargs)
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
gen_mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(gen_mon)
self.mon = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
def test_default_monster_pos(self):
self.assertTrue(self.mon.Pos() is None)
def test_nondefault_monster_mana(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddMana(b, 50)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
got_mon = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(50, got_mon.Mana())
def test_default_monster_hp(self):
self.assertEqual(100, self.mon.Hp())
def test_default_monster_name(self):
self.assertEqual(None, self.mon.Name())
def test_default_monster_inventory_item(self):
self.assertEqual(0, self.mon.Inventory(0))
def test_default_monster_inventory_length(self):
self.assertEqual(0, self.mon.InventoryLength())
def test_default_monster_color(self):
self.assertEqual(MyGame.Example.Color.Color.Blue, self.mon.Color())
def test_nondefault_monster_color(self):
b = flatbuffers.Builder(0)
color = MyGame.Example.Color.Color.Red
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddColor(b, color)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(MyGame.Example.Color.Color.Red, mon2.Color())
def test_default_monster_testtype(self):
self.assertEqual(0, self.mon.TestType())
def test_default_monster_test_field(self):
self.assertEqual(None, self.mon.Test())
def test_default_monster_test4_item(self):
self.assertEqual(None, self.mon.Test4(0))
def test_default_monster_test4_length(self):
self.assertEqual(0, self.mon.Test4Length())
def test_default_monster_testarrayofstring(self):
self.assertEqual("", self.mon.Testarrayofstring(0))
def test_default_monster_testarrayofstring_length(self):
self.assertEqual(0, self.mon.TestarrayofstringLength())
def test_default_monster_testarrayoftables(self):
self.assertEqual(None, self.mon.Testarrayoftables(0))
def test_nondefault_monster_testarrayoftables(self):
b = flatbuffers.Builder(0)
# make a child Monster within a vector of Monsters:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddHp(b, 99)
sub_monster = MyGame.Example.Monster.MonsterEnd(b)
# build the vector:
MyGame.Example.Monster.MonsterStartTestarrayoftablesVector(b, 1)
b.PrependUOffsetTRelative(sub_monster)
vec = b.EndVector(1)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestarrayoftables(b, vec)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Output(), 0)
self.assertEqual(99, mon2.Testarrayoftables(0).Hp())
self.assertEqual(1, mon2.TestarrayoftablesLength())
def test_default_monster_testarrayoftables_length(self):
self.assertEqual(0, self.mon.TestarrayoftablesLength())
def test_nondefault_monster_enemy(self):
b = flatbuffers.Builder(0)
# make an Enemy object:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddHp(b, 88)
enemy = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(enemy)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddEnemy(b, enemy)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(88, mon2.Enemy().Hp())
def test_default_monster_testnestedflatbuffer(self):
self.assertEqual(0, self.mon.Testnestedflatbuffer(0))
def test_default_monster_testnestedflatbuffer_length(self):
self.assertEqual(0, self.mon.TestnestedflatbufferLength())
def test_nondefault_monster_testnestedflatbuffer(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStartTestnestedflatbufferVector(b, 3)
b.PrependByte(4)
b.PrependByte(2)
b.PrependByte(0)
sub_buf = b.EndVector(3)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestnestedflatbuffer(b, sub_buf)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(3, mon2.TestnestedflatbufferLength())
self.assertEqual(0, mon2.Testnestedflatbuffer(0))
self.assertEqual(2, mon2.Testnestedflatbuffer(1))
self.assertEqual(4, mon2.Testnestedflatbuffer(2))
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
self.assertEqual([0, 2, 4], mon2.TestnestedflatbufferAsNumpy().tolist())
except ImportError:
assertRaises(self,
lambda: mon2.TestnestedflatbufferAsNumpy(),
NumpyRequiredForThisFeature)
def test_nondefault_monster_testempty(self):
b = flatbuffers.Builder(0)
# make a Stat object:
MyGame.Example.Stat.StatStart(b)
MyGame.Example.Stat.StatAddVal(b, 123)
my_stat = MyGame.Example.Stat.StatEnd(b)
b.Finish(my_stat)
# include the stat object in a monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestempty(b, my_stat)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(123, mon2.Testempty().Val())
def test_default_monster_testbool(self):
self.assertFalse(self.mon.Testbool())
def test_nondefault_monster_testbool(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestbool(b, True)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertTrue(mon2.Testbool())
def test_default_monster_testhashes(self):
self.assertEqual(0, self.mon.Testhashs32Fnv1())
self.assertEqual(0, self.mon.Testhashu32Fnv1())
self.assertEqual(0, self.mon.Testhashs64Fnv1())
self.assertEqual(0, self.mon.Testhashu64Fnv1())
self.assertEqual(0, self.mon.Testhashs32Fnv1a())
self.assertEqual(0, self.mon.Testhashu32Fnv1a())
self.assertEqual(0, self.mon.Testhashs64Fnv1a())
self.assertEqual(0, self.mon.Testhashu64Fnv1a())
def test_nondefault_monster_testhashes(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTesthashs32Fnv1(b, 1)
MyGame.Example.Monster.MonsterAddTesthashu32Fnv1(b, 2)
MyGame.Example.Monster.MonsterAddTesthashs64Fnv1(b, 3)
MyGame.Example.Monster.MonsterAddTesthashu64Fnv1(b, 4)
MyGame.Example.Monster.MonsterAddTesthashs32Fnv1a(b, 5)
MyGame.Example.Monster.MonsterAddTesthashu32Fnv1a(b, 6)
MyGame.Example.Monster.MonsterAddTesthashs64Fnv1a(b, 7)
MyGame.Example.Monster.MonsterAddTesthashu64Fnv1a(b, 8)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(1, mon2.Testhashs32Fnv1())
self.assertEqual(2, mon2.Testhashu32Fnv1())
self.assertEqual(3, mon2.Testhashs64Fnv1())
self.assertEqual(4, mon2.Testhashu64Fnv1())
self.assertEqual(5, mon2.Testhashs32Fnv1a())
self.assertEqual(6, mon2.Testhashu32Fnv1a())
self.assertEqual(7, mon2.Testhashs64Fnv1a())
self.assertEqual(8, mon2.Testhashu64Fnv1a())
def test_getrootas_for_nonroot_table(self):
b = flatbuffers.Builder(0)
string = b.CreateString("MyStat")
MyGame.Example.Stat.StatStart(b)
MyGame.Example.Stat.StatAddId(b, string)
MyGame.Example.Stat.StatAddVal(b, 12345678)
MyGame.Example.Stat.StatAddCount(b, 12345)
stat = MyGame.Example.Stat.StatEnd(b)
b.Finish(stat)
stat2 = MyGame.Example.Stat.Stat.GetRootAsStat(b.Bytes, b.Head())
self.assertEqual(b"MyStat", stat2.Id())
self.assertEqual(12345678, stat2.Val())
self.assertEqual(12345, stat2.Count())
class TestAllCodePathsOfMonsterExtraSchema(unittest.TestCase):
def setUp(self, *args, **kwargs):
super(TestAllCodePathsOfMonsterExtraSchema, self).setUp(*args, **kwargs)
b = flatbuffers.Builder(0)
MyGame.MonsterExtra.MonsterExtraStart(b)
gen_mon = MyGame.MonsterExtra.MonsterExtraEnd(b)
b.Finish(gen_mon)
self.mon = MyGame.MonsterExtra.MonsterExtra.GetRootAsMonsterExtra(b.Bytes, b.Head())
def test_default_nan_inf(self):
self.assertTrue(math.isnan(self.mon.F1()))
self.assertEqual(self.mon.F2(), float("inf"))
self.assertEqual(self.mon.F3(), float("-inf"))
self.assertTrue(math.isnan(self.mon.D1()))
self.assertEqual(self.mon.D2(), float("inf"))
self.assertEqual(self.mon.D3(), float("-inf"))
class TestVtableDeduplication(unittest.TestCase):
''' TestVtableDeduplication verifies that vtables are deduplicated. '''
def test_vtable_deduplication(self):
b = flatbuffers.Builder(0)
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 11, 0)
b.PrependByteSlot(2, 22, 0)
b.PrependInt16Slot(3, 33, 0)
obj0 = b.EndObject()
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 44, 0)
b.PrependByteSlot(2, 55, 0)
b.PrependInt16Slot(3, 66, 0)
obj1 = b.EndObject()
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 77, 0)
b.PrependByteSlot(2, 88, 0)
b.PrependInt16Slot(3, 99, 0)
obj2 = b.EndObject()
got = b.Bytes[b.Head():]
want = bytearray([
240, 255, 255, 255, # == -12. offset to dedupped vtable.
99, 0,
88,
77,
248, 255, 255, 255, # == -8. offset to dedupped vtable.
66, 0,
55,
44,
12, 0,
8, 0,
0, 0,
7, 0,
6, 0,
4, 0,
12, 0, 0, 0,
33, 0,
22,
11,
])
self.assertEqual((len(want), want), (len(got), got))
table0 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj0)
table1 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj1)
table2 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj2)
def _checkTable(tab, voffsett_value, b, c, d):
# vtable size
got = tab.GetVOffsetTSlot(0, 0)
self.assertEqual(12, got, 'case 0, 0')
# object size
got = tab.GetVOffsetTSlot(2, 0)
self.assertEqual(8, got, 'case 2, 0')
# default value
got = tab.GetVOffsetTSlot(4, 0)
self.assertEqual(voffsett_value, got, 'case 4, 0')
got = tab.GetSlot(6, 0, N.Uint8Flags)
self.assertEqual(b, got, 'case 6, 0')
val = tab.GetSlot(8, 0, N.Uint8Flags)
self.assertEqual(c, val, 'failed 8, 0')
got = tab.GetSlot(10, 0, N.Uint8Flags)
self.assertEqual(d, got, 'failed 10, 0')
_checkTable(table0, 0, 11, 22, 33)
_checkTable(table1, 0, 44, 55, 66)
_checkTable(table2, 0, 77, 88, 99)
class TestExceptions(unittest.TestCase):
def test_object_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
assertRaises(self, lambda: b.StartObject(0),
flatbuffers.builder.IsNestedError)
def test_object_is_not_nested_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.EndObject(),
flatbuffers.builder.IsNotNestedError)
def test_struct_is_not_inline_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
assertRaises(self, lambda: b.PrependStructSlot(0, 1, 0),
flatbuffers.builder.StructIsNotInlineError)
def test_unreachable_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.PrependUOffsetTRelative(1),
flatbuffers.builder.OffsetArithmeticError)
def test_create_string_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
s = 'test1'
assertRaises(self, lambda: b.CreateString(s),
flatbuffers.builder.IsNestedError)
def test_create_byte_vector_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
s = b'test1'
assertRaises(self, lambda: b.CreateByteVector(s),
flatbuffers.builder.IsNestedError)
def test_finished_bytes_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.Output(),
flatbuffers.builder.BuilderNotFinishedError)
class TestFixedLengthArrays(unittest.TestCase):
def test_fixed_length_array(self):
builder = flatbuffers.Builder(0)
a = 0.5
b = range(0, 15)
c = 1
d_a = [[1, 2], [3, 4]]
d_b = [MyGame.Example.TestEnum.TestEnum.B, \
MyGame.Example.TestEnum.TestEnum.C]
d_c = [[MyGame.Example.TestEnum.TestEnum.A, \
MyGame.Example.TestEnum.TestEnum.B], \
[MyGame.Example.TestEnum.TestEnum.C, \
MyGame.Example.TestEnum.TestEnum.B]]
arrayOffset = MyGame.Example.ArrayStruct.CreateArrayStruct(builder, \
a, b, c, d_a, d_b, d_c)
# Create a table with the ArrayStruct.
MyGame.Example.ArrayTable.ArrayTableStart(builder)
MyGame.Example.ArrayTable.ArrayTableAddA(builder, arrayOffset)
tableOffset = MyGame.Example.ArrayTable.ArrayTableEnd(builder)
builder.Finish(tableOffset)
buf = builder.Output()
table = MyGame.Example.ArrayTable.ArrayTable.GetRootAsArrayTable(buf, 0)
# Verify structure.
nested = MyGame.Example.NestedStruct.NestedStruct()
self.assertEqual(table.A().A(), 0.5)
self.assertEqual(table.A().B(), \
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertEqual(table.A().C(), 1)
self.assertEqual(table.A().D(nested, 0).A(), [1, 2])
self.assertEqual(table.A().D(nested, 1).A(), [3, 4])
self.assertEqual(table.A().D(nested, 0).B(), \
MyGame.Example.TestEnum.TestEnum.B)
self.assertEqual(table.A().D(nested, 1).B(), \
MyGame.Example.TestEnum.TestEnum.C)
self.assertEqual(table.A().D(nested, 0).C(), \
[MyGame.Example.TestEnum.TestEnum.A, \
MyGame.Example.TestEnum.TestEnum.B])
self.assertEqual(table.A().D(nested, 1).C(), \
[MyGame.Example.TestEnum.TestEnum.C, \
MyGame.Example.TestEnum.TestEnum.B])
def CheckAgainstGoldDataGo():
try:
gen_buf, gen_off = make_monster_from_generated_code()
fn = 'monsterdata_go_wire.mon'
if not os.path.exists(fn):
print('Go-generated data does not exist, failed.')
return False
# would like to use a context manager here, but it's less
# backwards-compatible:
f = open(fn, 'rb')
go_wire_data = f.read()
f.close()
CheckReadBuffer(bytearray(go_wire_data), 0)
if not bytearray(gen_buf[gen_off:]) == bytearray(go_wire_data):
raise AssertionError('CheckAgainstGoldDataGo failed')
except:
print('Failed to test against Go-generated test data.')
return False
print('Can read Go-generated test data, and Python generates bytewise identical data.')
return True
def CheckAgainstGoldDataJava():
try:
gen_buf, gen_off = make_monster_from_generated_code()
fn = 'monsterdata_java_wire.mon'
if not os.path.exists(fn):
print('Java-generated data does not exist, failed.')
return False
f = open(fn, 'rb')
java_wire_data = f.read()
f.close()
CheckReadBuffer(bytearray(java_wire_data), 0)
except:
print('Failed to read Java-generated test data.')
return False
print('Can read Java-generated test data.')
return True
class LCG(object):
''' Include simple random number generator to ensure results will be the
same cross platform.
http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator '''
__slots__ = ['n']
InitialLCGSeed = 48271
def __init__(self):
self.n = self.InitialLCGSeed
def Reset(self):
self.n = self.InitialLCGSeed
def Next(self):
self.n = ((self.n * 279470273) % 4294967291) & 0xFFFFFFFF
return self.n
def BenchmarkVtableDeduplication(count):
'''
BenchmarkVtableDeduplication measures the speed of vtable deduplication
by creating `prePop` vtables, then populating `count` objects with a
different single vtable.
When count is large (as in long benchmarks), memory usage may be high.
'''
for prePop in (1, 10, 100, 1000):
builder = flatbuffers.Builder(0)
n = 1 + int(math.log(prePop, 1.5))
# generate some layouts:
layouts = set()
r = list(compat_range(n))
while len(layouts) < prePop:
layouts.add(tuple(sorted(random.sample(r, int(max(1, n / 2))))))
layouts = list(layouts)
# pre-populate vtables:
for layout in layouts:
builder.StartObject(n)
for j in layout:
builder.PrependInt16Slot(j, j, 0)
builder.EndObject()
# benchmark deduplication of a new vtable:
def f():
layout = random.choice(layouts)
builder.StartObject(n)
for j in layout:
builder.PrependInt16Slot(j, j, 0)
builder.EndObject()
duration = timeit.timeit(stmt=f, number=count)
rate = float(count) / duration
print(('vtable deduplication rate (n=%d, vtables=%d): %.2f sec' % (
prePop,
len(builder.vtables),
rate))
)
def BenchmarkCheckReadBuffer(count, buf, off):
'''
BenchmarkCheckReadBuffer measures the speed of flatbuffer reading
by re-using the CheckReadBuffer function with the gold data.
'''
def f():
CheckReadBuffer(buf, off)
duration = timeit.timeit(stmt=f, number=count)
rate = float(count) / duration
data = float(len(buf) * count) / float(1024 * 1024)
data_rate = data / float(duration)
print(('traversed %d %d-byte flatbuffers in %.2fsec: %.2f/sec, %.2fMB/sec')
% (count, len(buf), duration, rate, data_rate))
def BenchmarkMakeMonsterFromGeneratedCode(count, length):
'''
BenchmarkMakeMonsterFromGeneratedCode measures the speed of flatbuffer
creation by re-using the make_monster_from_generated_code function for
generating gold data examples.
'''
duration = timeit.timeit(stmt=make_monster_from_generated_code,
number=count)
rate = float(count) / duration
data = float(length * count) / float(1024 * 1024)
data_rate = data / float(duration)
print(('built %d %d-byte flatbuffers in %.2fsec: %.2f/sec, %.2fMB/sec' % \
(count, length, duration, rate, data_rate)))
def backward_compatible_run_tests(**kwargs):
if PY_VERSION < (2, 6):
sys.stderr.write("Python version less than 2.6 are not supported")
sys.stderr.flush()
return False
# python2.6 has a reduced-functionality unittest.main function:
if PY_VERSION == (2, 6):
try:
unittest.main(**kwargs)
except SystemExit as e:
if not e.code == 0:
return False
return True
# python2.7 and above let us not exit once unittest.main is run:
kwargs['exit'] = False
kwargs['verbosity'] = 0
ret = unittest.main(**kwargs)
if ret.result.errors or ret.result.failures:
return False
return True
def main():
import os
import sys
if not len(sys.argv) == 4:
sys.stderr.write('Usage: %s <benchmark vtable count>'
'<benchmark read count> <benchmark build count>\n'
% sys.argv[0])
sys.stderr.write(' Provide COMPARE_GENERATED_TO_GO=1 to check'
'for bytewise comparison to Go data.\n')
sys.stderr.write(' Provide COMPARE_GENERATED_TO_JAVA=1 to check'
'for bytewise comparison to Java data.\n')
sys.stderr.flush()
sys.exit(1)
kwargs = dict(argv=sys.argv[:-3])
# run tests, and run some language comparison checks if needed:
success = backward_compatible_run_tests(**kwargs)
if success and os.environ.get('COMPARE_GENERATED_TO_GO', 0) == "1":
success = success and CheckAgainstGoldDataGo()
if success and os.environ.get('COMPARE_GENERATED_TO_JAVA', 0) == "1":
success = success and CheckAgainstGoldDataJava()
if not success:
sys.stderr.write('Tests failed, skipping benchmarks.\n')
sys.stderr.flush()
sys.exit(1)
# run benchmarks (if 0, they will be a noop):
bench_vtable = int(sys.argv[1])
bench_traverse = int(sys.argv[2])
bench_build = int(sys.argv[3])
if bench_vtable:
BenchmarkVtableDeduplication(bench_vtable)
if bench_traverse:
buf, off = make_monster_from_generated_code()
BenchmarkCheckReadBuffer(bench_traverse, buf, off)
if bench_build:
buf, off = make_monster_from_generated_code()
BenchmarkMakeMonsterFromGeneratedCode(bench_build, len(buf))
if __name__ == '__main__':
main()
| apache-2.0 |
apanda/phantomjs-intercept | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/version_check.py | 128 | 1745 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
if sys.version < '2.6' or sys.version >= '2.8':
print >> sys.stderr, "Unsupported Python version: WebKit only supports 2.6.x - 2.7.x, and you're running %s." % sys.version.split()[0]
sys.exit(1)
| bsd-3-clause |
Mickey32111/pogom | pogom/pgoapi/protos/POGOProtos/Inventory/AppliedItem_pb2.py | 15 | 3946 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Inventory/AppliedItem.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory.Item import ItemId_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2
from POGOProtos.Inventory.Item import ItemType_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemType__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/AppliedItem.proto',
package='POGOProtos.Inventory',
syntax='proto3',
serialized_pb=_b('\n&POGOProtos/Inventory/AppliedItem.proto\x12\x14POGOProtos.Inventory\x1a&POGOProtos/Inventory/Item/ItemId.proto\x1a(POGOProtos/Inventory/Item/ItemType.proto\"\xa0\x01\n\x0b\x41ppliedItem\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.POGOProtos.Inventory.Item.ItemId\x12\x36\n\titem_type\x18\x02 \x01(\x0e\x32#.POGOProtos.Inventory.Item.ItemType\x12\x11\n\texpire_ms\x18\x03 \x01(\x03\x12\x12\n\napplied_ms\x18\x04 \x01(\x03\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2.DESCRIPTOR,POGOProtos_dot_Inventory_dot_Item_dot_ItemType__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_APPLIEDITEM = _descriptor.Descriptor(
name='AppliedItem',
full_name='POGOProtos.Inventory.AppliedItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='POGOProtos.Inventory.AppliedItem.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='item_type', full_name='POGOProtos.Inventory.AppliedItem.item_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expire_ms', full_name='POGOProtos.Inventory.AppliedItem.expire_ms', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='applied_ms', full_name='POGOProtos.Inventory.AppliedItem.applied_ms', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=147,
serialized_end=307,
)
_APPLIEDITEM.fields_by_name['item_id'].enum_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2._ITEMID
_APPLIEDITEM.fields_by_name['item_type'].enum_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemType__pb2._ITEMTYPE
DESCRIPTOR.message_types_by_name['AppliedItem'] = _APPLIEDITEM
AppliedItem = _reflection.GeneratedProtocolMessageType('AppliedItem', (_message.Message,), dict(
DESCRIPTOR = _APPLIEDITEM,
__module__ = 'POGOProtos.Inventory.AppliedItem_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Inventory.AppliedItem)
))
_sym_db.RegisterMessage(AppliedItem)
# @@protoc_insertion_point(module_scope)
| mit |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/scrapy/contrib/spiders/crawl.py | 12 | 3244 | """
This modules implements the CrawlSpider which is the recommended spider to use
for scraping typical web sites that requires crawling pages.
See documentation in docs/topics/spiders.rst
"""
import copy
from scrapy.http import Request, HtmlResponse
from scrapy.utils.spider import iterate_spider_output
from scrapy.spider import Spider
def identity(x):
return x
class Rule(object):
def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity):
self.link_extractor = link_extractor
self.callback = callback
self.cb_kwargs = cb_kwargs or {}
self.process_links = process_links
self.process_request = process_request
if follow is None:
self.follow = False if callback else True
else:
self.follow = follow
class CrawlSpider(Spider):
rules = ()
def __init__(self, *a, **kw):
super(CrawlSpider, self).__init__(*a, **kw)
self._compile_rules()
def parse(self, response):
return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True)
def parse_start_url(self, response):
return []
def process_results(self, response, results):
return results
def _requests_to_follow(self, response):
if not isinstance(response, HtmlResponse):
return
seen = set()
for n, rule in enumerate(self._rules):
links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = Request(url=link.url, callback=self._response_downloaded)
r.meta.update(rule=n, link_text=link.text)
yield rule.process_request(r)
def _response_downloaded(self, response):
rule = self._rules[response.meta['rule']]
return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow)
def _parse_response(self, response, callback, cb_kwargs, follow=True):
if callback:
cb_res = callback(response, **cb_kwargs) or ()
cb_res = self.process_results(response, cb_res)
for requests_or_item in iterate_spider_output(cb_res):
yield requests_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _compile_rules(self):
def get_method(method):
if callable(method):
return method
elif isinstance(method, basestring):
return getattr(self, method, None)
self._rules = [copy.copy(r) for r in self.rules]
for rule in self._rules:
rule.callback = get_method(rule.callback)
rule.process_links = get_method(rule.process_links)
rule.process_request = get_method(rule.process_request)
def set_crawler(self, crawler):
super(CrawlSpider, self).set_crawler(crawler)
self._follow_links = crawler.settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True)
| gpl-2.0 |
doct-rubens/predator-prey-beetle-wasp | tests/test_basic.py | 1 | 2731 | # -*- coding: utf-8 -*-
#
# Basic testing script that creates a sample universe and world
# with a predefined set of parameters and executes a simulation batch
import numpy as np
import matplotlib.pyplot as plt
from simul.universe import Universe
from simul.world import WonderfulWorld
from simul.control import SimulationControl
from media.plotter import Plotter
# reset the random generator seed (obtain same results for
# debugging purposes)
np.random.seed(42)
# beetle universe laws:
# mfr - male/female ratio - male probability
# lm, lv - lifespan (mean and var, normal distribution)
# amin, amax - initial population age (age, min and max)
# fr - fertility ratio
# om, ov - offspring mean and variance (normal distribution)
# aa - adult age (aa, equal or higher is adult)
# ee - egg age (ea, less or equal is egg)
# rd - random death (chance)
# fly universe laws parameters
fly_params = {'mfr': 0.3, 'lm': 24, 'lv': 4, 'amin': 17, 'amax': 17,
'fr': 1.0, 'om': 28, 'ov': 19, 'aa': 15, 'ee': 9, 'rd': 0.05}
# moth universe laws parameters
moth_params = {'mfr': 0.5, 'lm': 70, 'lv': 2, 'amin': 0, 'amax': 65,
'fr': 1.0, 'om': 60, 'ov': 20, 'aa': 63, 'ee': 10, 'rd': 0.04}
# other parameters:
# pc - predation coefficient
other_params = {'pc': 10.0}
# default costs:
costs = {'fly': 0.0027, 'moth': 0.005}
# initial number of flies and moths
nf = 0
nm = 2488
# number of simulation steps and number of simulations
steps = 200
n_simuls = 1
# image generation params
title = 'test simulation'
parent_path = 'output_images'
path = 'test_simulation'
# 'living','dead','male','female','randomly_killed','old_age_killed','parents','newborn','predation','caterpillars'
columns = ['moth-living', 'fly-living', 'fly-newborn', 'moth-female', 'moth-male']
# output csv file generation params
output_csv_dir = 'outputs'
output_csv_name = 'simul_results'
output_csv = 'all' # can be 'all', 'mean' or 'none'
output_costs = 'mean' # same as above, 'all', 'mean' or 'none'
# create the classes
my_plotter = Plotter(title, path, columns, n_simuls, parent_path=parent_path)
u = Universe(*fly_params.values(), *moth_params.values(), *other_params.values())
w = WonderfulWorld(u, fil=fil, mil=mil)
s = SimulationControl(w, *costs.values(), plotter=my_plotter)
# run a simulation batch
df = s.simulation_batch(nf, nm, steps, n_simuls,
output_csv=output_csv,
output_costs=output_costs,
output_dir=output_csv_dir,
output_name=output_csv_name
)
df[['moth-living', 'fly-living']].plot()
plt.show()
| bsd-2-clause |
Cl3MM/metagoofil | hachoir_parser/game/zsnes.py | 95 | 13425 | """
ZSNES Save State Parser (v143 only currently)
Author: Jason Gorski
Creation date: 2006-09-15
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, StaticFieldSet,
UInt8, UInt16, UInt32,
String, PaddingBytes, Bytes, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
class ZSTHeader(StaticFieldSet):
format = (
(String, "zs_mesg", 26, "File header", {"charset": "ASCII"}),
(UInt8, "zs_mesglen", "File header string len"),
(UInt8, "zs_version", "Version minor #"),
(UInt8, "curcyc", "cycles left in scanline"),
(UInt16, "curypos", "current y position"),
(UInt8, "cacheud", "update cache every ? frames"),
(UInt8, "ccud", "current cache increment"),
(UInt8, "intrset", "interrupt set"),
(UInt8, "cycpl", "cycles per scanline"),
(UInt8, "cycphb", "cycles per hblank"),
(UInt8, "spcon", "SPC Enable (1=enabled)"),
(UInt16, "stackand", "value to and stack to keep it from going to the wrong area"),
(UInt16, "stackor", "value to or stack to keep it from going to the wrong area"),
)
class ZSTcpu(StaticFieldSet):
format = (
(UInt16, "xat"),
(UInt8, "xdbt"),
(UInt8, "xpbt"),
(UInt16, "xst"),
(UInt16, "xdt"),
(UInt16, "xxt"),
(UInt16, "xyt"),
(UInt8, "xp"),
(UInt8, "xe"),
(UInt16, "xpc"),
(UInt8, "xirqb", "which bank the irqs start at"),
(UInt8, "debugger", "Start with debugger (1: yes, 0: no)"),
(UInt32, "Curtable" "Current table address"),
(UInt8, "curnmi", "if in NMI (1=yes)"),
(UInt32, "cycpbl", "percentage left of CPU/SPC to run (3.58 = 175)"),
(UInt32, "cycpblt", "percentage of CPU/SPC to run"),
)
class ZSTppu(FieldSet):
static_size = 3019*8
def createFields(self):
yield UInt8(self, "sndrot", "rotates to use A,X or Y for sound skip")
yield UInt8(self, "sndrot2", "rotates a random value for sound skip")
yield UInt8(self, "INTEnab", "enables NMI(7)/VIRQ(5)/HIRQ(4)/JOY(0)")
yield UInt8(self, "NMIEnab", "controlled in e65816 loop. Sets to 81h")
yield UInt16(self, "VIRQLoc", "VIRQ Y location")
yield UInt8(self, "vidbright", "screen brightness 0..15")
yield UInt8(self, "previdbr", "previous screen brightness")
yield UInt8(self, "forceblnk", "force blanking on/off ($80=on)")
yield UInt32(self, "objptr", "pointer to object data in VRAM")
yield UInt32(self, "objptrn", "pointer2 to object data in VRAM")
yield UInt8(self, "objsize1", "1=8dot, 4=16dot, 16=32dot, 64=64dot")
yield UInt8(self, "objsize2", "large object size")
yield UInt8(self, "objmovs1", "number of bytes to move/paragraph")
yield UInt16(self, "objadds1", "number of bytes to add/paragraph")
yield UInt8(self, "objmovs2", "number of bytes to move/paragraph")
yield UInt16(self, "objadds2", "number of bytes to add/paragraph")
yield UInt16(self, "oamaddrt", "oam address")
yield UInt16(self, "oamaddrs", "oam address at beginning of vblank")
yield UInt8(self, "objhipr", "highest priority object #")
yield UInt8(self, "bgmode", "graphics mode 0..7")
yield UInt8(self, "bg3highst", "is 1 if background 3 has the highest priority")
yield UInt8(self, "bgtilesz", "0=8x8, 1=16x16 bit0=bg1, bit1=bg2, etc.")
yield UInt8(self, "mosaicon", "mosaic on, bit 0=bg1, bit1=bg2, etc.")
yield UInt8(self, "mosaicsz", "mosaic size in pixels")
yield UInt16(self, "bg1ptr", "pointer to background1")
yield UInt16(self, "bg2ptr", "pointer to background2")
yield UInt16(self, "bg3ptr", "pointer to background3")
yield UInt16(self, "bg4ptr", "pointer to background4")
yield UInt16(self, "bg1ptrb", "pointer to background1")
yield UInt16(self, "bg2ptrb", "pointer to background2")
yield UInt16(self, "bg3ptrb", "pointer to background3")
yield UInt16(self, "bg4ptrb", "pointer to background4")
yield UInt16(self, "bg1ptrc", "pointer to background1")
yield UInt16(self, "bg2ptrc", "pointer to background2")
yield UInt16(self, "bg3ptrc", "pointer to background3")
yield UInt16(self, "bg4ptrc", "pointer to background4")
yield UInt16(self, "bg1ptrd", "pointer to background1")
yield UInt16(self, "bg2ptrd", "pointer to background2")
yield UInt16(self, "bg3ptrd", "pointer to background3")
yield UInt16(self, "bg4ptrd", "pointer to background4")
yield UInt8(self, "bg1scsize", "bg #1 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt8(self, "bg2scsize", "bg #2 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt8(self, "bg3scsize", "bg #3 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt8(self, "bg4scsize", "bg #4 screen size (0=1x1,1=1x2,2=2x1,3=2x2)")
yield UInt16(self, "bg1objptr", "pointer to tiles in background1")
yield UInt16(self, "bg2objptr", "pointer to tiles in background2")
yield UInt16(self, "bg3objptr", "pointer to tiles in background3")
yield UInt16(self, "bg4objptr", "pointer to tiles in background4")
yield UInt16(self, "bg1scrolx", "background 1 x position")
yield UInt16(self, "bg2scrolx", "background 2 x position")
yield UInt16(self, "bg3scrolx", "background 3 x position")
yield UInt16(self, "bg4scrolx", "background 4 x position")
yield UInt16(self, "bg1sx", "Temporary Variable for Debugging purposes")
yield UInt16(self, "bg1scroly", "background 1 y position")
yield UInt16(self, "bg2scroly", "background 2 y position")
yield UInt16(self, "bg3scroly", "background 3 y position")
yield UInt16(self, "bg4scroly", "background 4 y position")
yield UInt16(self, "addrincr", "vram increment (2,64,128,256)")
yield UInt8(self, "vramincr", "0 = increment at 2118/2138, 1 = 2119,213A")
yield UInt8(self, "vramread", "0 = address set, 1 = already read once")
yield UInt32(self, "vramaddr", "vram address")
yield UInt16(self, "cgaddr", "cg (palette)")
yield UInt8(self, "cgmod", "if cgram is modified or not")
yield UInt16(self, "scrnon", "main & sub screen on")
yield UInt8(self, "scrndist", "which background is disabled")
yield UInt16(self, "resolutn", "screen resolution")
yield UInt8(self, "multa", "multiplier A")
yield UInt16(self, "diva", "divisor C")
yield UInt16(self, "divres", "quotent of divc/divb")
yield UInt16(self, "multres", "result of multa * multb/remainder of divc/divb")
yield UInt16(self, "latchx", "latched x value")
yield UInt16(self, "latchy", "latched y value")
yield UInt8(self, "latchxr", "low or high byte read for x value")
yield UInt8(self, "latchyr", "low or high byte read for y value")
yield UInt8(self, "frskipper", "used to control frame skipping")
yield UInt8(self, "winl1", "window 1 left position")
yield UInt8(self, "winr1", "window 1 right position")
yield UInt8(self, "winl2", "window 2 left position")
yield UInt8(self, "winr2", "window 2 right position")
yield UInt8(self, "winbg1en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG1")
yield UInt8(self, "winbg2en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG2")
yield UInt8(self, "winbg3en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG3")
yield UInt8(self, "winbg4en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG4")
yield UInt8(self, "winobjen", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on sprites")
yield UInt8(self, "wincolen", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on backarea")
yield UInt8(self, "winlogica", "Window logic type for BG1 to 4")
yield UInt8(self, "winlogicb", "Window logic type for Sprites and Backarea")
yield UInt8(self, "winenabm", "Window logic enable for main screen")
yield UInt8(self, "winenabs", "Window logic enable for sub sceen")
yield UInt8(self, "mode7set", "mode 7 settings")
yield UInt16(self, "mode7A", "A value for Mode 7")
yield UInt16(self, "mode7B", "B value for Mode 7")
yield UInt16(self, "mode7C", "C value for Mode 7")
yield UInt16(self, "mode7D", "D value for Mode 7")
yield UInt16(self, "mode7X0", "Center X for Mode 7")
yield UInt16(self, "mode7Y0", "Center Y for Mode 7")
yield UInt8(self, "JoyAPos", "Old-Style Joystick Read Position for Joy 1 & 3")
yield UInt8(self, "JoyBPos", "Old-Style Joystick Read Position for Joy 2 & 4")
yield UInt32(self, "compmult", "Complement Multiplication for Mode 7")
yield UInt8(self, "joyalt", "temporary joystick alternation")
yield UInt32(self, "wramrwadr", "continuous read/write to wram address")
yield RawBytes(self, "dmadata", 129, "dma data (written from ports 43xx)")
yield UInt8(self, "irqon", "if IRQ has been called (80h) or not (0)")
yield UInt8(self, "nexthdma", "HDMA data to execute once vblank ends")
yield UInt8(self, "curhdma", "Currently executed hdma")
yield RawBytes(self, "hdmadata", 152, "4 dword register addresses, # bytes to transfer/line, address increment (word)")
yield UInt8(self, "hdmatype", "if first time executing hdma or not")
yield UInt8(self, "coladdr", "red value of color to add")
yield UInt8(self, "coladdg", "green value of color to add")
yield UInt8(self, "coladdb", "blue value of color to add")
yield UInt8(self, "colnull", "keep this 0 (when accessing colors by dword)")
yield UInt8(self, "scaddset", "screen/fixed color addition settings")
yield UInt8(self, "scaddtype", "which screen to add/sub")
yield UInt8(self, "Voice0Disabl2", "Disable Voice 0")
yield UInt8(self, "Voice1Disabl2", "Disable Voice 1")
yield UInt8(self, "Voice2Disabl2", "Disable Voice 2")
yield UInt8(self, "Voice3Disabl2", "Disable Voice 3")
yield UInt8(self, "Voice4Disabl2", "Disable Voice 4")
yield UInt8(self, "Voice5Disabl2", "Disable Voice 5")
yield UInt8(self, "Voice6Disabl2", "Disable Voice 6")
yield UInt8(self, "Voice7Disabl2", "Disable Voice 7")
yield RawBytes(self, "oamram", 1024, "OAMRAM (544 bytes)")
yield RawBytes(self, "cgram", 512, "CGRAM")
yield RawBytes(self, "pcgram", 512, "Previous CGRAM")
yield UInt8(self, "vraminctype")
yield UInt8(self, "vramincby8on", "if increment by 8 is on")
yield UInt8(self, "vramincby8left", "how many left")
yield UInt8(self, "vramincby8totl", "how many in total (32,64,128)")
yield UInt8(self, "vramincby8rowl", "how many left in that row (start at 8)")
yield UInt16(self, "vramincby8ptri", "increment by how many when rowl = 0")
yield UInt8(self, "nexthprior")
yield UInt8(self, "doirqnext")
yield UInt16(self, "vramincby8var")
yield UInt8(self, "screstype")
yield UInt8(self, "extlatch")
yield UInt8(self, "cfield")
yield UInt8(self, "interlval")
yield UInt16(self, "HIRQLoc HIRQ X")
# NEWer ZST format
yield UInt8(self, "KeyOnStA")
yield UInt8(self, "KeyOnStB")
yield UInt8(self, "SDD1BankA")
yield UInt8(self, "SDD1BankB")
yield UInt8(self, "SDD1BankC")
yield UInt8(self, "SDD1BankD")
yield UInt8(self, "vramread2")
yield UInt8(self, "nosprincr")
yield UInt16(self, "poamaddrs")
yield UInt8(self, "ioportval")
yield UInt8(self, "iohvlatch")
yield UInt8(self, "ppustatus")
yield PaddingBytes(self, "tempdat", 477, "Reserved/Unused")
class ZSNESFile(Parser):
PARSER_TAGS = {
"id": "zsnes",
"category": "game",
"description": "ZSNES Save State File (only version 143)",
"min_size": 3091*8,
"file_ext": ("zst", "zs1", "zs2", "zs3", "zs4", "zs5", "zs6",
"zs7", "zs8", "zs9")
}
endian = LITTLE_ENDIAN
def validate(self):
temp = self.stream.readBytes(0,28)
if temp[0:26] != "ZSNES Save State File V143":
return "Wrong header"
if ord(temp[27:28]) != 143: # extra...
return "Wrong save version %d <> 143" % temp[27:1]
return True
def seek(self, offset):
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
def createFields(self):
yield ZSTHeader(self, "header", "ZST header") # Offset: 0
yield ZSTcpu(self, "cpu", "ZST cpu registers") # 41
yield ZSTppu(self, "ppu", "ZST CPU registers") # 72
yield RawBytes(self, "wram7E", 65536) # 3091
yield RawBytes(self, "wram7F", 65536) # 68627
yield RawBytes(self, "vram", 65536) # 134163
# TODO: Interpret extra on-cart chip data found at/beyond... 199699
# TODO: Interpret Thumbnail/Screenshot data found at 275291
# 64*56*2(16bit colors) = 7168
padding = self.seekByte(275291, relative=False)
if padding is not None:
yield padding
yield Bytes(self, "thumbnail", 7168, "Thumbnail of playing game in some sort of raw 64x56x16-bit RGB mode?")
| gpl-2.0 |
karthik-sethuraman/ONFOpenTransport | RI/flask_server/tapi_server/models/tapi_odu_mapping_type.py | 4 | 1101 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiOduMappingType(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
"""
allowed enum values
"""
AMP = "AMP"
BMP = "BMP"
GFP_F = "GFP-F"
GMP = "GMP"
TTP_GFP_BMP = "TTP_GFP_BMP"
NULL = "NULL"
def __init__(self): # noqa: E501
"""TapiOduMappingType - a model defined in OpenAPI
"""
self.openapi_types = {
}
self.attribute_map = {
}
@classmethod
def from_dict(cls, dikt) -> 'TapiOduMappingType':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.odu.MappingType of this TapiOduMappingType. # noqa: E501
:rtype: TapiOduMappingType
"""
return util.deserialize_model(dikt, cls)
| apache-2.0 |
denys-duchier/django | tests/template_tests/filter_tests/test_cut.py | 521 | 2269 | from django.template.defaultfilters import cut
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class CutTests(SimpleTestCase):
@setup({'cut01': '{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}'})
def test_cut01(self):
output = self.engine.render_to_string('cut01', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "&y &y")
@setup({'cut02': '{{ a|cut:"x" }} {{ b|cut:"x" }}'})
def test_cut02(self):
output = self.engine.render_to_string('cut02', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "&y &y")
@setup({'cut03': '{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}'})
def test_cut03(self):
output = self.engine.render_to_string('cut03', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "xy xamp;y")
@setup({'cut04': '{{ a|cut:"&" }} {{ b|cut:"&" }}'})
def test_cut04(self):
output = self.engine.render_to_string('cut04', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "xy xamp;y")
# Passing ';' to cut can break existing HTML entities, so those strings
# are auto-escaped.
@setup({'cut05': '{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}'})
def test_cut05(self):
output = self.engine.render_to_string('cut05', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
@setup({'cut06': '{{ a|cut:";" }} {{ b|cut:";" }}'})
def test_cut06(self):
output = self.engine.render_to_string('cut06', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&ampy")
class FunctionTests(SimpleTestCase):
def test_character(self):
self.assertEqual(cut('a string to be mangled', 'a'), ' string to be mngled')
def test_characters(self):
self.assertEqual(cut('a string to be mangled', 'ng'), 'a stri to be maled')
def test_non_matching_string(self):
self.assertEqual(cut('a string to be mangled', 'strings'), 'a string to be mangled')
def test_non_string_input(self):
self.assertEqual(cut(123, '2'), '13')
| bsd-3-clause |
slightstone/SickRage | lib/unidecode/x094.py | 252 | 4661 | data = (
'Kui ', # 0x00
'Si ', # 0x01
'Liu ', # 0x02
'Nao ', # 0x03
'Heng ', # 0x04
'Pie ', # 0x05
'Sui ', # 0x06
'Fan ', # 0x07
'Qiao ', # 0x08
'Quan ', # 0x09
'Yang ', # 0x0a
'Tang ', # 0x0b
'Xiang ', # 0x0c
'Jue ', # 0x0d
'Jiao ', # 0x0e
'Zun ', # 0x0f
'Liao ', # 0x10
'Jie ', # 0x11
'Lao ', # 0x12
'Dui ', # 0x13
'Tan ', # 0x14
'Zan ', # 0x15
'Ji ', # 0x16
'Jian ', # 0x17
'Zhong ', # 0x18
'Deng ', # 0x19
'Ya ', # 0x1a
'Ying ', # 0x1b
'Dui ', # 0x1c
'Jue ', # 0x1d
'Nou ', # 0x1e
'Ti ', # 0x1f
'Pu ', # 0x20
'Tie ', # 0x21
'[?] ', # 0x22
'[?] ', # 0x23
'Ding ', # 0x24
'Shan ', # 0x25
'Kai ', # 0x26
'Jian ', # 0x27
'Fei ', # 0x28
'Sui ', # 0x29
'Lu ', # 0x2a
'Juan ', # 0x2b
'Hui ', # 0x2c
'Yu ', # 0x2d
'Lian ', # 0x2e
'Zhuo ', # 0x2f
'Qiao ', # 0x30
'Qian ', # 0x31
'Zhuo ', # 0x32
'Lei ', # 0x33
'Bi ', # 0x34
'Tie ', # 0x35
'Huan ', # 0x36
'Ye ', # 0x37
'Duo ', # 0x38
'Guo ', # 0x39
'Dang ', # 0x3a
'Ju ', # 0x3b
'Fen ', # 0x3c
'Da ', # 0x3d
'Bei ', # 0x3e
'Yi ', # 0x3f
'Ai ', # 0x40
'Zong ', # 0x41
'Xun ', # 0x42
'Diao ', # 0x43
'Zhu ', # 0x44
'Heng ', # 0x45
'Zhui ', # 0x46
'Ji ', # 0x47
'Nie ', # 0x48
'Ta ', # 0x49
'Huo ', # 0x4a
'Qing ', # 0x4b
'Bin ', # 0x4c
'Ying ', # 0x4d
'Kui ', # 0x4e
'Ning ', # 0x4f
'Xu ', # 0x50
'Jian ', # 0x51
'Jian ', # 0x52
'Yari ', # 0x53
'Cha ', # 0x54
'Zhi ', # 0x55
'Mie ', # 0x56
'Li ', # 0x57
'Lei ', # 0x58
'Ji ', # 0x59
'Zuan ', # 0x5a
'Kuang ', # 0x5b
'Shang ', # 0x5c
'Peng ', # 0x5d
'La ', # 0x5e
'Du ', # 0x5f
'Shuo ', # 0x60
'Chuo ', # 0x61
'Lu ', # 0x62
'Biao ', # 0x63
'Bao ', # 0x64
'Lu ', # 0x65
'[?] ', # 0x66
'[?] ', # 0x67
'Long ', # 0x68
'E ', # 0x69
'Lu ', # 0x6a
'Xin ', # 0x6b
'Jian ', # 0x6c
'Lan ', # 0x6d
'Bo ', # 0x6e
'Jian ', # 0x6f
'Yao ', # 0x70
'Chan ', # 0x71
'Xiang ', # 0x72
'Jian ', # 0x73
'Xi ', # 0x74
'Guan ', # 0x75
'Cang ', # 0x76
'Nie ', # 0x77
'Lei ', # 0x78
'Cuan ', # 0x79
'Qu ', # 0x7a
'Pan ', # 0x7b
'Luo ', # 0x7c
'Zuan ', # 0x7d
'Luan ', # 0x7e
'Zao ', # 0x7f
'Nie ', # 0x80
'Jue ', # 0x81
'Tang ', # 0x82
'Shu ', # 0x83
'Lan ', # 0x84
'Jin ', # 0x85
'Qiu ', # 0x86
'Yi ', # 0x87
'Zhen ', # 0x88
'Ding ', # 0x89
'Zhao ', # 0x8a
'Po ', # 0x8b
'Diao ', # 0x8c
'Tu ', # 0x8d
'Qian ', # 0x8e
'Chuan ', # 0x8f
'Shan ', # 0x90
'Ji ', # 0x91
'Fan ', # 0x92
'Diao ', # 0x93
'Men ', # 0x94
'Nu ', # 0x95
'Xi ', # 0x96
'Chai ', # 0x97
'Xing ', # 0x98
'Gai ', # 0x99
'Bu ', # 0x9a
'Tai ', # 0x9b
'Ju ', # 0x9c
'Dun ', # 0x9d
'Chao ', # 0x9e
'Zhong ', # 0x9f
'Na ', # 0xa0
'Bei ', # 0xa1
'Gang ', # 0xa2
'Ban ', # 0xa3
'Qian ', # 0xa4
'Yao ', # 0xa5
'Qin ', # 0xa6
'Jun ', # 0xa7
'Wu ', # 0xa8
'Gou ', # 0xa9
'Kang ', # 0xaa
'Fang ', # 0xab
'Huo ', # 0xac
'Tou ', # 0xad
'Niu ', # 0xae
'Ba ', # 0xaf
'Yu ', # 0xb0
'Qian ', # 0xb1
'Zheng ', # 0xb2
'Qian ', # 0xb3
'Gu ', # 0xb4
'Bo ', # 0xb5
'E ', # 0xb6
'Po ', # 0xb7
'Bu ', # 0xb8
'Ba ', # 0xb9
'Yue ', # 0xba
'Zuan ', # 0xbb
'Mu ', # 0xbc
'Dan ', # 0xbd
'Jia ', # 0xbe
'Dian ', # 0xbf
'You ', # 0xc0
'Tie ', # 0xc1
'Bo ', # 0xc2
'Ling ', # 0xc3
'Shuo ', # 0xc4
'Qian ', # 0xc5
'Liu ', # 0xc6
'Bao ', # 0xc7
'Shi ', # 0xc8
'Xuan ', # 0xc9
'She ', # 0xca
'Bi ', # 0xcb
'Ni ', # 0xcc
'Pi ', # 0xcd
'Duo ', # 0xce
'Xing ', # 0xcf
'Kao ', # 0xd0
'Lao ', # 0xd1
'Er ', # 0xd2
'Mang ', # 0xd3
'Ya ', # 0xd4
'You ', # 0xd5
'Cheng ', # 0xd6
'Jia ', # 0xd7
'Ye ', # 0xd8
'Nao ', # 0xd9
'Zhi ', # 0xda
'Dang ', # 0xdb
'Tong ', # 0xdc
'Lu ', # 0xdd
'Diao ', # 0xde
'Yin ', # 0xdf
'Kai ', # 0xe0
'Zha ', # 0xe1
'Zhu ', # 0xe2
'Xian ', # 0xe3
'Ting ', # 0xe4
'Diu ', # 0xe5
'Xian ', # 0xe6
'Hua ', # 0xe7
'Quan ', # 0xe8
'Sha ', # 0xe9
'Jia ', # 0xea
'Yao ', # 0xeb
'Ge ', # 0xec
'Ming ', # 0xed
'Zheng ', # 0xee
'Se ', # 0xef
'Jiao ', # 0xf0
'Yi ', # 0xf1
'Chan ', # 0xf2
'Chong ', # 0xf3
'Tang ', # 0xf4
'An ', # 0xf5
'Yin ', # 0xf6
'Ru ', # 0xf7
'Zhu ', # 0xf8
'Lao ', # 0xf9
'Pu ', # 0xfa
'Wu ', # 0xfb
'Lai ', # 0xfc
'Te ', # 0xfd
'Lian ', # 0xfe
'Keng ', # 0xff
)
| gpl-3.0 |
crwilcox/PyGithub | github/tests/Label.py | 39 | 2611 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class Label(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.label = self.g.get_user().get_repo("PyGithub").get_label("Bug")
def testAttributes(self):
self.assertEqual(self.label.color, "e10c02")
self.assertEqual(self.label.name, "Bug")
self.assertEqual(self.label.url, "https://api.github.com/repos/jacquev6/PyGithub/labels/Bug")
def testEdit(self):
self.label.edit("LabelEditedByPyGithub", "0000ff")
self.assertEqual(self.label.color, "0000ff")
self.assertEqual(self.label.name, "LabelEditedByPyGithub")
self.assertEqual(self.label.url, "https://api.github.com/repos/jacquev6/PyGithub/labels/LabelEditedByPyGithub")
def testDelete(self):
self.label.delete()
| gpl-3.0 |
Ph0enixxx/jieba | test/test_tokenize_no_hmm.py | 65 | 5544 | #encoding=utf-8
from __future__ import print_function,unicode_literals
import sys
sys.path.append("../")
import jieba
g_mode="default"
def cuttest(test_sent):
global g_mode
result = jieba.tokenize(test_sent,mode=g_mode,HMM=False)
for tk in result:
print("word %s\t\t start: %d \t\t end:%d" % (tk[0],tk[1],tk[2]))
if __name__ == "__main__":
for m in ("default","search"):
g_mode = m
cuttest("这是一个伸手不见五指的黑夜。我叫孙悟空,我爱北京,我爱Python和C++。")
cuttest("我不喜欢日本和服。")
cuttest("雷猴回归人间。")
cuttest("工信处女干事每月经过下属科室都要亲口交代24口交换机等技术性器件的安装工作")
cuttest("我需要廉租房")
cuttest("永和服装饰品有限公司")
cuttest("我爱北京天安门")
cuttest("abc")
cuttest("隐马尔可夫")
cuttest("雷猴是个好网站")
cuttest("“Microsoft”一词由“MICROcomputer(微型计算机)”和“SOFTware(软件)”两部分组成")
cuttest("草泥马和欺实马是今年的流行词汇")
cuttest("伊藤洋华堂总府店")
cuttest("中国科学院计算技术研究所")
cuttest("罗密欧与朱丽叶")
cuttest("我购买了道具和服装")
cuttest("PS: 我觉得开源有一个好处,就是能够敦促自己不断改进,避免敞帚自珍")
cuttest("湖北省石首市")
cuttest("湖北省十堰市")
cuttest("总经理完成了这件事情")
cuttest("电脑修好了")
cuttest("做好了这件事情就一了百了了")
cuttest("人们审美的观点是不同的")
cuttest("我们买了一个美的空调")
cuttest("线程初始化时我们要注意")
cuttest("一个分子是由好多原子组织成的")
cuttest("祝你马到功成")
cuttest("他掉进了无底洞里")
cuttest("中国的首都是北京")
cuttest("孙君意")
cuttest("外交部发言人马朝旭")
cuttest("领导人会议和第四届东亚峰会")
cuttest("在过去的这五年")
cuttest("还需要很长的路要走")
cuttest("60周年首都阅兵")
cuttest("你好人们审美的观点是不同的")
cuttest("买水果然后来世博园")
cuttest("买水果然后去世博园")
cuttest("但是后来我才知道你是对的")
cuttest("存在即合理")
cuttest("的的的的的在的的的的就以和和和")
cuttest("I love你,不以为耻,反以为rong")
cuttest("因")
cuttest("")
cuttest("hello你好人们审美的观点是不同的")
cuttest("很好但主要是基于网页形式")
cuttest("hello你好人们审美的观点是不同的")
cuttest("为什么我不能拥有想要的生活")
cuttest("后来我才")
cuttest("此次来中国是为了")
cuttest("使用了它就可以解决一些问题")
cuttest(",使用了它就可以解决一些问题")
cuttest("其实使用了它就可以解决一些问题")
cuttest("好人使用了它就可以解决一些问题")
cuttest("是因为和国家")
cuttest("老年搜索还支持")
cuttest("干脆就把那部蒙人的闲法给废了拉倒!RT @laoshipukong : 27日,全国人大常委会第三次审议侵权责任法草案,删除了有关医疗损害责任“举证倒置”的规定。在医患纠纷中本已处于弱势地位的消费者由此将陷入万劫不复的境地。 ")
cuttest("大")
cuttest("")
cuttest("他说的确实在理")
cuttest("长春市长春节讲话")
cuttest("结婚的和尚未结婚的")
cuttest("结合成分子时")
cuttest("旅游和服务是最好的")
cuttest("这件事情的确是我的错")
cuttest("供大家参考指正")
cuttest("哈尔滨政府公布塌桥原因")
cuttest("我在机场入口处")
cuttest("邢永臣摄影报道")
cuttest("BP神经网络如何训练才能在分类时增加区分度?")
cuttest("南京市长江大桥")
cuttest("应一些使用者的建议,也为了便于利用NiuTrans用于SMT研究")
cuttest('长春市长春药店')
cuttest('邓颖超生前最喜欢的衣服')
cuttest('胡锦涛是热爱世界和平的政治局常委')
cuttest('程序员祝海林和朱会震是在孙健的左面和右面, 范凯在最右面.再往左是李松洪')
cuttest('一次性交多少钱')
cuttest('两块五一套,三块八一斤,四块七一本,五块六一条')
cuttest('小和尚留了一个像大和尚一样的和尚头')
cuttest('我是中华人民共和国公民;我爸爸是共和党党员; 地铁和平门站')
cuttest('张晓梅去人民医院做了个B超然后去买了件T恤')
cuttest('AT&T是一件不错的公司,给你发offer了吗?')
cuttest('C++和c#是什么关系?11+122=133,是吗?PI=3.14159')
cuttest('你认识那个和主席握手的的哥吗?他开一辆黑色的士。')
cuttest('枪杆子中出政权')
cuttest('张三风同学走上了不归路')
cuttest('阿Q腰间挂着BB机手里拿着大哥大,说:我一般吃饭不AA制的。')
cuttest('在1号店能买到小S和大S八卦的书。')
| mit |
pmoravec/sos | sos/report/reporting.py | 2 | 7251 | # Copyright (C) 2014 Red Hat, Inc.,
# Bryn M. Reeves <bmr@redhat.com>
#
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
""" This provides a restricted tag language to define the sosreport
index/report
"""
try:
import json
except ImportError:
import simplejson as json
class Node(object):
def __str__(self):
return json.dumps(self.data)
def can_add(self, node):
return False
class Leaf(Node):
"""Marker class that can be added to a Section node"""
pass
class Report(Node):
"""The root element of a report. This is a container for sections."""
def __init__(self):
self.data = {}
def can_add(self, node):
return isinstance(node, Section)
def add(self, *nodes):
for node in nodes:
if self.can_add(node):
self.data[node.name] = node.data
def _decode(s):
"""returns a string text for a given unicode/str input"""
return (s if isinstance(s, str) else s.decode('utf8', 'ignore'))
class Section(Node):
"""A section is a container for leaf elements. Sections may be nested
inside of Report objects only."""
def __init__(self, name):
self.name = _decode(name)
self.data = {}
def can_add(self, node):
return isinstance(node, Leaf)
def add(self, *nodes):
for node in nodes:
if self.can_add(node):
self.data.setdefault(node.ADDS_TO, []).append(node.data)
class Command(Leaf):
ADDS_TO = "commands"
def __init__(self, name, return_code, href):
self.data = {"name": _decode(name),
"return_code": return_code,
"href": _decode(href)}
class CopiedFile(Leaf):
ADDS_TO = "copied_files"
def __init__(self, name, href):
self.data = {"name": _decode(name),
"href": _decode(href)}
class CreatedFile(Leaf):
ADDS_TO = "created_files"
def __init__(self, name, href):
self.data = {"name": _decode(name),
"href": _decode(href)}
class Alert(Leaf):
ADDS_TO = "alerts"
def __init__(self, content):
self.data = _decode(content)
class Note(Leaf):
ADDS_TO = "notes"
def __init__(self, content):
self.data = _decode(content)
def ends_bs(string):
""" Return True if 'string' ends with a backslash, and False otherwise.
Define this as a named function for no other reason than that pep8
now forbids binding of a lambda expression to a name:
'E731 do not assign a lambda expression, use a def'
"""
return string.endswith('\\')
class PlainTextReport(object):
"""Will generate a plain text report from a top_level Report object"""
HEADER = ""
FOOTER = ""
LEAF = " * %(name)s"
ALERT = " ! %s"
NOTE = " * %s"
PLUGLISTHEADER = "Loaded Plugins:"
PLUGLISTITEM = " {name}"
PLUGLISTSEP = "\n"
PLUGLISTMAXITEMS = 5
PLUGLISTFOOTER = ""
PLUGINFORMAT = "{name}"
PLUGDIVIDER = "=" * 72
subsections = (
(Command, LEAF, "- commands executed:", ""),
(CopiedFile, LEAF, "- files copied:", ""),
(CreatedFile, LEAF, "- files created:", ""),
(Alert, ALERT, "- alerts:", ""),
(Note, NOTE, "- notes:", ""),
)
line_buf = []
def __init__(self, report_node):
self.report_data = sorted(dict.items(report_node.data))
def unicode(self):
self.line_buf = line_buf = []
if (len(self.HEADER) > 0):
line_buf.append(self.HEADER)
# generate section/plugin list, split long list to multiple lines
line_buf.append(self.PLUGLISTHEADER)
line = ""
i = 0
plugcount = len(self.report_data)
for section_name, _ in self.report_data:
line += self.PLUGLISTITEM.format(name=section_name)
i += 1
if (i % self.PLUGLISTMAXITEMS == 0) and (i < plugcount):
line += self.PLUGLISTSEP
line += self.PLUGLISTFOOTER
line_buf.append(line)
for section_name, section_contents in self.report_data:
line_buf.append(self.PLUGDIVIDER)
line_buf.append(self.PLUGINFORMAT.format(name=section_name))
for type_, format_, header, footer in self.subsections:
self.process_subsection(section_contents, type_.ADDS_TO,
header, format_, footer)
if (len(self.FOOTER) > 0):
line_buf.append(self.FOOTER)
output = u'\n'.join(map(lambda i: (i if isinstance(i, str)
else i.decode('utf8', 'ignore')),
line_buf))
return output
def process_subsection(self, section, key, header, format_, footer):
if key in section:
self.line_buf.append(header)
for item in sorted(
section.get(key),
key=lambda x: x["name"] if isinstance(x, dict) else ''
):
self.line_buf.append(format_ % item)
if (len(footer) > 0):
self.line_buf.append(footer)
class HTMLReport(PlainTextReport):
"""Will generate a HTML report from a top_level Report object"""
HEADER = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html;
charset=utf-8" />
<title>Sos System Report</title>
<style type="text/css">
td {
padding: 0 5px;
}
</style>
</head>
<body>\n"""
FOOTER = "</body></html>"
LEAF = '<li><a href="%(href)s">%(name)s</a></li>'
ALERT = "<li>%s</li>"
NOTE = "<li>%s</li>"
PLUGLISTHEADER = "<h3>Loaded Plugins:</h3><table><tr>"
PLUGLISTITEM = '<td><a href="#{name}">{name}</a></td>\n'
PLUGLISTSEP = "</tr>\n<tr>"
PLUGLISTMAXITEMS = 5
PLUGLISTFOOTER = "</tr></table>"
PLUGINFORMAT = '<h2 id="{name}">Plugin <em>{name}</em></h2>'
PLUGDIVIDER = "<hr/>\n"
subsections = (
(Command, LEAF, "<p>Commands executed:</p><ul>", "</ul>"),
(CopiedFile, LEAF, "<p>Files copied:</p><ul>", "</ul>"),
(CreatedFile, LEAF, "<p>Files created:</p><ul>", "</ul>"),
(Alert, ALERT, "<p>Alerts:</p><ul>", "</ul>"),
(Note, NOTE, "<p>Notes:</p><ul>", "</ul>"),
)
class JSONReport(PlainTextReport):
"""Will generate a JSON report from a top_level Report object"""
def unicode(self):
output = json.dumps(self.report_data, indent=4, ensure_ascii=False)
return output
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
waprin/gcloud-python | gcloud/bigtable/_generated/bigtable_table_service_pb2.py | 6 | 15984 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2
from gcloud.bigtable._generated import bigtable_table_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_service.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\n;google/bigtable/admin/table/v1/bigtable_table_service.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\x1a\x44google/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\x89\x0b\n\x14\x42igtableTableService\x12\xa4\x01\n\x0b\x43reateTable\x12\x32.google.bigtable.admin.table.v1.CreateTableRequest\x1a%.google.bigtable.admin.table.v1.Table\":\x82\xd3\xe4\x93\x02\x34\"//v1/{name=projects/*/zones/*/clusters/*}/tables:\x01*\x12\xac\x01\n\nListTables\x12\x31.google.bigtable.admin.table.v1.ListTablesRequest\x1a\x32.google.bigtable.admin.table.v1.ListTablesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{name=projects/*/zones/*/clusters/*}/tables\x12\x9d\x01\n\x08GetTable\x12/.google.bigtable.admin.table.v1.GetTableRequest\x1a%.google.bigtable.admin.table.v1.Table\"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x94\x01\n\x0b\x44\x65leteTable\x12\x32.google.bigtable.admin.table.v1.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"9\x82\xd3\xe4\x93\x02\x33*1/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x9e\x01\n\x0bRenameTable\x12\x32.google.bigtable.admin.table.v1.RenameTableRequest\x1a\x16.google.protobuf.Empty\"C\x82\xd3\xe4\x93\x02=\"8/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename:\x01*\x12\xca\x01\n\x12\x43reateColumnFamily\x12\x39.google.bigtable.admin.table.v1.CreateColumnFamilyRequest\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"K\x82\xd3\xe4\x93\x02\x45\"@/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies:\x01*\x12\xbf\x01\n\x12UpdateColumnFamily\x12,.google.bigtable.admin.table.v1.ColumnFamily\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"M\x82\xd3\xe4\x93\x02G\x1a\x42/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}:\x01*\x12\xb3\x01\n\x12\x44\x65leteColumnFamily\x12\x39.google.bigtable.admin.table.v1.DeleteColumnFamilyRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44*B/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}BB\n\"com.google.bigtable.admin.table.v1B\x1a\x42igtableTableServicesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\032BigtableTableServicesProtoP\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableTableServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListTables(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def RenameTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CreateColumnFamily(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateColumnFamily(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteColumnFamily(self, request, context):
raise NotImplementedError()
class BetaBigtableTableServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateTable(self, request, timeout):
raise NotImplementedError()
CreateTable.future = None
@abc.abstractmethod
def ListTables(self, request, timeout):
raise NotImplementedError()
ListTables.future = None
@abc.abstractmethod
def GetTable(self, request, timeout):
raise NotImplementedError()
GetTable.future = None
@abc.abstractmethod
def DeleteTable(self, request, timeout):
raise NotImplementedError()
DeleteTable.future = None
@abc.abstractmethod
def RenameTable(self, request, timeout):
raise NotImplementedError()
RenameTable.future = None
@abc.abstractmethod
def CreateColumnFamily(self, request, timeout):
raise NotImplementedError()
CreateColumnFamily.future = None
@abc.abstractmethod
def UpdateColumnFamily(self, request, timeout):
raise NotImplementedError()
UpdateColumnFamily.future = None
@abc.abstractmethod
def DeleteColumnFamily(self, request, timeout):
raise NotImplementedError()
DeleteColumnFamily.future = None
def beta_create_BigtableTableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
request_deserializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
}
response_serializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
}
method_implementations = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): face_utilities.unary_unary_inline(servicer.CreateColumnFamily),
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): face_utilities.unary_unary_inline(servicer.DeleteColumnFamily),
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables),
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): face_utilities.unary_unary_inline(servicer.RenameTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): face_utilities.unary_unary_inline(servicer.UpdateColumnFamily),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableTableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
request_serializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
}
response_deserializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
}
cardinalities = {
'CreateColumnFamily': cardinality.Cardinality.UNARY_UNARY,
'CreateTable': cardinality.Cardinality.UNARY_UNARY,
'DeleteColumnFamily': cardinality.Cardinality.UNARY_UNARY,
'DeleteTable': cardinality.Cardinality.UNARY_UNARY,
'GetTable': cardinality.Cardinality.UNARY_UNARY,
'ListTables': cardinality.Cardinality.UNARY_UNARY,
'RenameTable': cardinality.Cardinality.UNARY_UNARY,
'UpdateColumnFamily': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.table.v1.BigtableTableService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_5_0/spm_history_end_host_history_grid_broker.py | 18 | 6716 | from ..broker import Broker
class SpmHistoryEndHostHistoryGridBroker(Broker):
controller = "spm_history_end_host_history_grids"
def index(self, **kwargs):
"""Lists the available spm history end host history grids. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param HostDeviceID: An internal NetMRI identifier for the host.
:type HostDeviceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param HostDeviceMAC: The MAC Address for the host.
:type HostDeviceMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param HostDeviceIPNumeric: The Numeric IP Address for the host.
:type HostDeviceIPNumeric: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param HostDeviceIPDotted: The IP Address for the host.
:type HostDeviceIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the spm history end host history grids with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the spm history end host history grids with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, FirstSeen, LastSeen, HostIPNumeric, HostIPAddress, HostMAC, HostName, DeviceID, DeviceType, DeviceName, InterfaceID, ifIndex, Interface, ifMAC, ifOperStatus, VlanIndex, VlanName, VlanID, VTPDomain, VirtualNetworkID, Network.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SpmHistoryEndHostHistoryGrid. Valid values are id, FirstSeen, LastSeen, HostIPNumeric, HostIPAddress, HostMAC, HostName, DeviceID, DeviceType, DeviceName, InterfaceID, ifIndex, Interface, ifMAC, ifOperStatus, VlanIndex, VlanName, VlanID, VTPDomain, VirtualNetworkID, Network. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param refresh_ind: If true, the grid will be regenerated, rather than using any available cached grid data.
:type refresh_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param async_ind: If true and if grid data is not yet available, it will return immediately with 202 status. User should retry again later.
:type async_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_history_end_host_history_grids: An array of the SpmHistoryEndHostHistoryGrid objects that match the specified input criteria.
:rtype spm_history_end_host_history_grids: Array of SpmHistoryEndHostHistoryGrid
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return summary: A summary of calculation of selected columns, when applicable.
:rtype summary: Hash
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
| apache-2.0 |
rdamas/e2openplugin-OpenWebif | plugin/controllers/models/control.py | 3 | 7247 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##########################################################################
# OpenWebif: control
##########################################################################
# Copyright (C) 2011 - 2020 E2OpenPlugins
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
##########################################################################
from __future__ import print_function
from Components.config import config
from enigma import eServiceReference, eActionMap, eServiceCenter
from Plugins.Extensions.OpenWebif.controllers.models.services import getProtection
from Screens.InfoBar import InfoBar, MoviePlayer
import NavigationInstance
import os
ENABLE_QPIP_PROCPATH = "/proc/stb/video/decodermode"
def checkIsQPiP():
if os.access(ENABLE_QPIP_PROCPATH, os.F_OK):
fd = open(ENABLE_QPIP_PROCPATH, "r")
data = fd.read()
fd.close()
return data.strip() == "mosaic"
return False
def getPlayingref(ref):
playingref = None
if NavigationInstance.instance:
playingref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if not playingref:
playingref = eServiceReference()
return playingref
def isPlayableForCur(ref):
info = eServiceCenter.getInstance().info(ref)
return info and info.isPlayable(ref, getPlayingref(ref))
def zapInServiceList(service):
InfoBar_Instance = InfoBar.instance
servicelist = InfoBar_Instance.servicelist
if config.usage.multibouquet.value:
rootstrings = ('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet', '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet')
else:
rootstrings = ('1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 22) || (type == 25) || (type == 134) || (type == 195) FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet', '1:7:2:0:0:0:0:0:0:0:(type == 2) || (type == 10) FROM BOUQUET "userbouquet.favourites.radio" ORDER BY bouquet')
bouquet_found = False
for bouquet_rootstr in rootstrings:
servicelist.bouquet_root = eServiceReference(bouquet_rootstr)
if bouquet_rootstr.find('radio') != -1:
servicelist.setModeRadio()
else:
servicelist.setModeTv()
bouquets = servicelist.getBouquetList()
for bouquet in bouquets:
reflist = []
reflist = eServiceCenter.getInstance().list(bouquet[1])
if reflist:
while True:
new_service = reflist.getNext()
if not new_service.valid(): # check if end of list
break
if new_service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker):
continue
if new_service == service:
bouquet_found = True
break
if bouquet_found:
break
if bouquet_found:
break
if bouquet_found:
bouquet = bouquet[1]
if servicelist.getRoot() != bouquet:
servicelist.clearPath()
if servicelist.bouquet_root != bouquet:
servicelist.enterPath(servicelist.bouquet_root)
servicelist.enterPath(bouquet)
else:
servicelist.clearPath()
servicelist.enterPath(service)
servicelist.setCurrentSelection(service) # select the service in servicelist
servicelist.zap()
def zapService(session, id, title="", stream=False):
if checkIsQPiP():
return {
"result": False,
"message": "Can not zap service in quad PiP mode."
}
# Must NOT unquote id here, breaks zap to streams
service = eServiceReference(id)
if len(title) > 0:
service.setName(title)
else:
title = id
isRecording = service.getPath()
isRecording = isRecording and isRecording.startswith("/")
if not isRecording:
if config.ParentalControl.servicepinactive.value and config.OpenWebif.parentalenabled.value:
if getProtection(service.toString()) != "0":
return {
"result": False,
"message": "Service '%s' is blocked by parental Control" % title
}
# use mediaplayer for recording
if isRecording:
if isinstance(session.current_dialog, InfoBar):
session.open(MoviePlayer, service)
else:
session.nav.playService(service)
else:
if isinstance(session.current_dialog, MoviePlayer):
session.current_dialog.lastservice = service
session.current_dialog.close()
from Screens.Standby import inStandby
if inStandby is None:
zapInServiceList(service)
else:
if stream:
stop_text = ""
if session.nav.getCurrentlyPlayingServiceReference() and isPlayableForCur(service):
session.nav.stopService()
stop_text = ": simple stop current service"
return {
"result": True,
"message": "For stream don't need zap in standby %s" % stop_text
}
else:
session.nav.playService(service)
return {
"result": True,
"message": "Active service is now '%s'" % title
}
def remoteControl(key, type="", rcu=""):
# TODO: do something better here
if rcu == "standard":
remotetype = "dreambox remote control (native)"
elif rcu == "advanced":
remotetype = "dreambox advanced remote control (native)"
elif rcu == "keyboard":
remotetype = "dreambox ir keyboard"
else:
if config.misc.rcused.value == 0:
remotetype = "dreambox advanced remote control (native)"
else:
remotetype = "dreambox remote control (native)"
try:
from Tools.HardwareInfo import HardwareInfo
if HardwareInfo().get_device_model() in ("xp1000", "formuler1", "formuler3", "et9000", "et9200", "hd1100", "hd1200"):
remotetype = "dreambox advanced remote control (native)"
except: # nosec # noqa: E722
print("[OpenWebIf] wrong hw detection")
amap = eActionMap.getInstance()
if type == "long":
amap.keyPressed(remotetype, key, 0)
amap.keyPressed(remotetype, key, 3)
elif type == "ascii":
amap.keyPressed(remotetype, key, 4)
else:
amap.keyPressed(remotetype, key, 0)
amap.keyPressed(remotetype, key, 1)
return {
"result": True,
"message": "RC command '%s' has been issued" % str(key)
}
def setPowerState(session, state):
from Screens.Standby import Standby, TryQuitMainloop, inStandby
state = int(state)
if state == 0: # Toggle StandBy
if inStandby is None:
session.open(Standby)
else:
inStandby.Power()
elif state == 1: # DeepStandBy
session.open(TryQuitMainloop, state)
elif state == 2: # Reboot
session.open(TryQuitMainloop, state)
elif state == 3: # Restart Enigma
session.open(TryQuitMainloop, state)
elif state == 4: # Wakeup
if inStandby is not None:
inStandby.Power()
elif state == 5: # Standby
if inStandby is None:
session.open(Standby)
elif state == 6:
print("HAHA")
return {
"result": True,
"instandby": inStandby is not None
}
def getStandbyState(session):
from Screens.Standby import inStandby
return {
"result": True,
"instandby": inStandby is not None
}
| gpl-3.0 |
exelearning/iteexe | twisted/python/urlpath.py | 81 | 3431 | # -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
import urlparse
import urllib
class URLPath:
def __init__(self, scheme='', netloc='localhost', path='',
query='', fragment=''):
self.scheme = scheme or 'http'
self.netloc = netloc
self.path = path or '/'
self.query = query
self.fragment = fragment
_qpathlist = None
_uqpathlist = None
def pathList(self, unquote=0, copy=1):
if self._qpathlist is None:
self._qpathlist = self.path.split('/')
self._uqpathlist = map(urllib.unquote, self._qpathlist)
if unquote:
result = self._uqpathlist
else:
result = self._qpathlist
if copy:
return result[:]
else:
return result
def fromString(klass, st):
t = urlparse.urlsplit(st)
u = klass(*t)
return u
fromString = classmethod(fromString)
def fromRequest(klass, request):
return klass.fromString(request.prePathURL())
fromRequest = classmethod(fromRequest)
def _pathMod(self, newpathsegs, keepQuery):
if keepQuery:
query = self.query
else:
query = ''
return URLPath(self.scheme,
self.netloc,
'/'.join(newpathsegs),
query)
def sibling(self, path, keepQuery=0):
l = self.pathList()
l[-1] = path
return self._pathMod(l, keepQuery)
def child(self, path, keepQuery=0):
l = self.pathList()
if l[-1] == '':
l[-1] = path
else:
l.append(path)
return self._pathMod(l, keepQuery)
def parent(self, keepQuery=0):
l = self.pathList()
if l[-1] == '':
del l[-2]
else:
# We are a file, such as http://example.com/foo/bar
# our parent directory is http://example.com/
l.pop()
l[-1] = ''
return self._pathMod(l, keepQuery)
def here(self, keepQuery=0):
l = self.pathList()
if l[-1] != '':
l[-1] = ''
return self._pathMod(l, keepQuery)
def click(self, st):
"""Return a path which is the URL where a browser would presumably take
you if you clicked on a link with an HREF as given.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(st)
if not scheme:
scheme = self.scheme
if not netloc:
netloc = self.netloc
if not path:
path = self.path
if not query:
query = self.query
elif path[0] != '/':
l = self.pathList()
l[-1] = path
path = '/'.join(l)
return URLPath(scheme,
netloc,
path,
query,
fragment)
def __str__(self):
x = urlparse.urlunsplit((
self.scheme, self.netloc, self.path,
self.query, self.fragment))
return x
def __repr__(self):
return ('URLPath(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)'
% (self.scheme, self.netloc, self.path, self.query, self.fragment))
| gpl-2.0 |
eunchong/build | scripts/master/perf_count_notifier.py | 3 | 19351 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import time
from urllib import urlencode
from urlparse import parse_qs, urlsplit, urlunsplit
from buildbot.status.builder import FAILURE, SUCCESS
from master import build_utils
from master.chromium_notifier import ChromiumNotifier
from master.failures_history import FailuresHistory
from twisted.internet import defer
from twisted.python import log
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint has issues importing it.
# pylint: disable=E0611,F0401
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
except ImportError:
raise
# The history of results expire every day.
_EXPIRATION_TIME = 24 * 3600
# Perf results key words used in test result step.
PERF_REGRESS = 'PERF_REGRESS'
PERF_IMPROVE = 'PERF_IMPROVE'
REGRESS = 'REGRESS'
IMPROVE = 'IMPROVE'
# Key to last time an email is sent per builder
EMAIL_TIME = 'EMAIL_TIME'
GRAPH_URL = 'GRAPH_URL'
def PerfLog(msg):
log.msg('[PerfCountNotifier] %s' % msg)
class PerfCountNotifier(ChromiumNotifier):
"""This is a status notifier that only alerts on consecutive perf changes.
The notifier only notifies when a number of consecutive REGRESS or IMPROVE
perf results are recorded.
See builder.interfaces.IStatusReceiver for more information about
parameters type.
"""
def __init__(self, step_names, minimum_count=5, combine_results=True,
**kwargs):
"""Initializes the PerfCountNotifier on tests starting with test_name.
Args:
step_names: List of perf steps names. This is needed to know perf steps
from other steps especially when the step is successful.
minimum_count: The number of minimum consecutive (REGRESS|IMPROVE) needed
to notify.
combine_results: Combine summary results email for all builders in one.
"""
# Set defaults.
ChromiumNotifier.__init__(self, **kwargs)
self.minimum_count = minimum_count
self.combine_results = combine_results
self.step_names = step_names
self.recent_results = None
self.error_email = False
self.new_email_results = {}
self.recent_results = FailuresHistory(expiration_time=_EXPIRATION_TIME,
size_limit=1000)
def AddNewEmailResult(self, result):
"""Stores an email result for a builder.
Args:
result: A tuple of the form ('REGRESS|IMPROVE', 'value_name', 'builder').
"""
builder_name = result[2]
build_results = self.GetEmailResults(builder_name)
if not result[1] in build_results[result[0]]:
build_results[result[0]].append(result[1])
else:
PerfLog('(%s) email result has already been stored.' % ', '.join(result))
def GetEmailResults(self, builder_name):
"""Returns the email results for a builder."""
if not builder_name in self.new_email_results:
self.new_email_results[builder_name] = GetNewBuilderResult()
return self.new_email_results[builder_name]
def _UpdateResults(self, builder_name, results):
"""Updates the results by adding/removing from the history.
Args:
builder_name: Builder name the results belong to.
results: List of result tuples, each tuple is of the form
('REGRESS|IMPROVE', 'value_name', 'builder').
"""
new_results_ids = [' '.join(result) for result in results]
# Delete the old results if the new results do not have them.
to_delete = [old_id for old_id in self.recent_results.failures
if (old_id not in new_results_ids and
old_id.endswith(builder_name))]
for old_id in to_delete:
self._DeleteResult(old_id)
# Update the new results history
for new_id in results:
self._StoreResult(new_id)
def _StoreResult(self, result):
"""Stores the result value and removes counter results.
Example: if this is a REGRESS result then it is stored and its counter
IMPROVE result, if any, is reset.
Args:
result: A tuple of the form ('REGRESS|IMPROVE', 'value_name', 'builder').
"""
self.recent_results.Put(' '.join(result))
if result[0] == REGRESS:
counter_id = IMPROVE + ' '.join(result[1:])
else:
counter_id = REGRESS + ' '.join(result[1:])
# Reset counter_id count since this breaks the consecutive count of it.
self._DeleteResult(counter_id)
def _DeleteResult(self, result_id):
"""Removes the history of results identified by result_id.
Args:
result_id: The id of the history entry (see _StoreResult() for details).
"""
num_results = self.recent_results.GetCount(result_id)
if num_results > 0:
# This is a hack into FailuresHistory since it does not allow to delete
# entries in its history unless they are expired.
# FailuresHistory.failures_count is the total number of entries in the
# history limitted by FailuresHistory.size_limit.
del self.recent_results.failures[result_id]
self.recent_results.failures_count -= num_results
def _DeleteAllForBuild(self, builder_name):
"""Deletes all test results related to a builder."""
to_delete = [result for result in self.recent_results.failures
if result.endswith(builder_name)]
for result in to_delete:
self._DeleteResult(result)
def _ResetResults(self, builder_name):
"""Reset pending email results for builder."""
builders = [builder_name]
if self.combine_results:
builders = self.new_email_results.keys()
for builder_name in builders:
self._DeleteAllForBuild(builder_name)
self.new_email_results[builder_name] = GetNewBuilderResult()
self.new_email_results[builder_name][EMAIL_TIME] = time.time()
def _IsPerfStep(self, step_status):
"""Checks if the step name is one of the defined perf tests names."""
return self.getName(step_status) in self.step_names
def isInterestingStep(self, build_status, step_status, results):
"""Ignore the step if it is not one of the perf results steps.
Returns:
True: - if a REGRESS|IMPROVE happens consecutive minimum number of times.
- if it is not a SUCCESS step and neither REGRESS|IMPROVE.
False: - if it is a SUCCESS step.
- if it is a notification which has already been notified.
"""
self.error_email = False
step_text = ' '.join(step_status.getText())
PerfLog('Analyzing failure text: %s.' % step_text)
if (not self._IsPerfStep(step_status) or
not self.isInterestingBuilder(build_status.getBuilder())):
return False
# In case of exceptions, sometimes results output is empty.
if not results:
results = [FAILURE]
builder_name = build_status.getBuilder().getName()
self.SetBuilderGraphURL(self.getName(step_status), build_status)
# If it is a success step, i.e. not interesting, then reset counters.
if results[0] == SUCCESS:
self._DeleteAllForBuild(builder_name)
return False
# step_text is similar to:
# media_tests_av_perf <div class="BuildResultInfo"> PERF_REGRESS:
# time/t (89.07%) PERF_IMPROVE: fps/video (5.40%) </div>
#
# regex would return tuples of the form:
# ('REGRESS', 'time/t', 'linux-rel')
# ('IMPROVE', 'fps/video', 'win-debug')
#
# It is important to put the builder name as the last element in the tuple
# since it is used to check tests that belong to same builder.
step_text = ' '.join(step_status.getText())
PerfLog('Analyzing failure text: %s.' % step_text)
perf_regress = perf_improve = ''
perf_results = []
if PERF_REGRESS in step_text:
perf_regress = step_text[step_text.find(PERF_REGRESS) + len(PERF_REGRESS)
+ 1: step_text.find(PERF_IMPROVE)]
perf_results.extend([(REGRESS, test_name, builder_name) for test_name in
re.findall(r'(\S+) (?=\(.+\))', perf_regress)])
if PERF_IMPROVE in step_text:
# Based on log_parser/process_log.py PerformanceChangesAsText() function,
# we assume that PERF_REGRESS (if any) appears before PERF_IMPROVE.
perf_improve = step_text[step_text.find(PERF_IMPROVE) + len(PERF_IMPROVE)
+ 1:]
perf_results.extend([(IMPROVE, test_name, builder_name) for test_name in
re.findall(r'(\S+) (?=\(.+\))', perf_improve)])
# If there is no regress or improve then this could be warning or exception.
if not perf_results:
if not self.recent_results.GetCount(step_text):
PerfLog('Unrecognized step status. Reporting status as interesting.')
# Force the build box to show in email
self.error_email = True
self.recent_results.Put(step_text)
return True
else:
PerfLog('This problem has already been notified.')
return False
update_list = []
for result in perf_results:
if len(result) != 3:
# We expect a tuple similar to ('REGRESS', 'time/t', 'linux-rel')
continue
result_id = ' '.join(result)
update_list.append(result)
PerfLog('Result: %s happened %d times in a row.' %
(result_id, self.recent_results.GetCount(result_id) + 1))
if self.recent_results.GetCount(result_id) >= self.minimum_count - 1:
# This is an interesting result! We got the minimum consecutive count of
# this result. Store it in email results.
PerfLog('Result: %s happened enough consecutive times to be reported.'
% result_id)
self.AddNewEmailResult(result)
self._UpdateResults(builder_name, update_list)
# Final decision is made based on whether there are any notifications to
# email based on this and older build results.
return self.ShouldSendEmail(builder_name)
def buildMessage(self, builder_name, build_status, results, step_name):
"""Send an email about this interesting step.
Add the perf regressions/improvements that resulted in this email if any.
"""
PerfLog('About to send an email.')
email_subject = self.GetEmailSubject(builder_name, build_status, results,
step_name)
email_body = self.GetEmailBody(builder_name, build_status, results,
step_name)
html_content = (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">'
'<html xmlns="http://www.w3.org/1999/xhtml"><body>%s</body></html>' %
email_body)
defered_object = self.BuildEmailObject(email_subject, html_content,
builder_name, build_status,
step_name)
self._ResetResults(builder_name)
return defered_object
def ShouldSendEmail(self, builder_name):
"""Returns if we should send a summary email at this moment.
Returns:
True if it has been at least minimum_delay_between_alert since the
last email sent. False otherwise.
"""
builders = [builder_name]
if self.combine_results:
builders = self.new_email_results.keys()
for builder_name in builders:
if self._ShouldSendEmail(builder_name):
return True
return False
def _ShouldSendEmail(self, builder_name):
results = self.GetEmailResults(builder_name)
last_time_mail_sent = results[EMAIL_TIME]
if (last_time_mail_sent and last_time_mail_sent >
time.time() - self.minimum_delay_between_alert):
# Rate limit tree alerts.
PerfLog('Time since last email is too short. Should not send email.')
return False
# Return True if there are any builder results to email about.
return results and (results[REGRESS] or results[IMPROVE])
def GetEmailSubject(self, builder_name, build_status, results, step_name):
"""Returns the subject of for an email based on perf results."""
project_name = self.master_status.getTitle()
latest_revision = build_utils.getLatestRevision(build_status)
result = 'changes'
builders = [builder_name]
if self.combine_results:
builders = self.new_email_results.keys()
return ('%s %s on %s, revision %s' %
(project_name, result, ', '.join(builders), str(latest_revision)))
def GetEmailHeader(self, builder_name, build_status, results, step_name):
"""Returns a header message in an email.
Used for backward compatibility with chromium_notifier. It allows the
users to add text to every email from the master.cfg setup.
"""
status_text = self.status_header % {
'builder': builder_name,
'steps': step_name,
'results': results
}
return status_text
def GetEmailBody(self, builder_name, build_status, results, step_name):
"""Returns the main email body content."""
email_body = ''
builders = [builder_name]
if self.combine_results:
builders = self.new_email_results.keys()
for builder_name in builders:
email_body += '%s%s\n' % (
self.GetEmailHeader(builder_name, build_status, results, step_name),
self.GetPerfEmailBody(builder_name)
)
# Latest build box is not relevant with multiple builder results combined.
if not self.combine_results or self.error_email:
email_body += ('\n\nLatest build results:%s' %
self.GenStepBox(builder_name, build_status, step_name))
PerfLog('Perf email body: %s' % email_body)
return email_body.replace('\n', '<br>')
def GetPerfEmailBody(self, builder_name):
builder_results = self.GetEmailResults(builder_name)
graph_url = builder_results[GRAPH_URL]
msg = ''
# Add regression HTML links.
if builder_results[REGRESS]:
test_urls = CreateHTMLTestURLList(graph_url, builder_results[REGRESS])
msg += '<strong>%s</strong>: %s.\n' % (PERF_REGRESS, ', '.join(test_urls))
# Add improvement HTML links.
if builder_results[IMPROVE]:
test_urls = CreateHTMLTestURLList(graph_url,
builder_results[IMPROVE])
msg += '<strong>%s</strong>: %s.\n' % (PERF_IMPROVE, ', '.join(test_urls))
return msg or 'No perf results.\n'
def BuildEmailObject(self, email_subject, html_content, builder_name,
build_status, step_name):
"""Creates an email object ready to be sent."""
m = MIMEMultipart('alternative')
m.attach(MIMEText(html_content, 'html', 'iso-8859-1'))
m['Date'] = formatdate(localtime=True)
m['Subject'] = email_subject
m['From'] = self.fromaddr
if self.reply_to:
m['Reply-To'] = self.reply_to
recipients = list(self.extraRecipients[:])
dl = []
if self.sendToInterestedUsers and self.lookup:
for u in build_status.getInterestedUsers():
d = defer.maybeDeferred(self.lookup.getAddress, u)
d.addCallback(recipients.append)
dl.append(d)
defered_object = defer.DeferredList(dl)
defered_object.addCallback(self._gotRecipients, recipients, m)
defered_object.addCallback(self.getFinishedMessage, builder_name,
build_status, step_name)
return defered_object
def GenStepBox(self, builder_name, build_status, step_name):
"""Generates a HTML styled summary box for one step."""
waterfall_url = self.master_status.getBuildbotURL()
styles = dict(build_utils.DEFAULT_STYLES)
builder_results = self.GetEmailResults(builder_name)
if builder_results[IMPROVE] and not builder_results[REGRESS]:
styles['warnings'] = styles['success']
return build_utils.EmailableBuildTable_bb8(build_status, waterfall_url,
styles=styles,
step_names=[step_name])
def SetBuilderGraphURL(self, step_name, build_status):
"""Stores the graph URL used in emails for this builder."""
builder_name = build_status.getBuilder().getName()
builder_results = self.GetEmailResults(builder_name)
graph_url = GetGraphURL(step_name, build_status)
latest_revision = build_utils.getLatestRevision(build_status)
if latest_revision:
graph_url = SetQueryParameter(graph_url, 'rev', latest_revision)
builder_results[GRAPH_URL] = graph_url
def GetStepByName(build_status, step_name):
"""Returns the build step with step_name."""
for step in build_status.getSteps():
if step.getName() == step_name:
return step
return None
def GetGraphURL(step_name, build_status):
"""Returns the graph result's URL from the step with step_name.
Args:
step_name: The name of the step to get the URL from.
build_status: The build status containing all steps in this build.
Return:
A string URL for the results graph page in the status step.
"""
step = GetStepByName(build_status, step_name)
if step and step.getURLs():
# Find the URL for results page
for name, target in step.getURLs().iteritems():
if 'report.html' in target:
PerfLog('Found graph URL %s %s ' % (name, target))
return SetQueryParameter(target, 'history', 150)
PerfLog('Could not find graph URL, step_name: %s.' % step_name)
return None
def CreateHTMLTestURLList(graph_url, test_names):
"""Creates a list of href HTML graph links for each test name result.
Args:
graph_url: The main result page URL.
test_names: A list of test names that should be included in the email.
Return:
A list of strings, each containing HTML href links to specific test
results with graph and trace parameters set. Example:
graph_url = 'http://build.chromium.org/f/chromium/perf/linux-release/\
media_tests_av_perf/report.html?history=150'
test_name = ['audio_latency/latency']
Return value = ['<a href="http://build.chromium.org/f/chromium/perf/\
linux-release/media_tests_av_perf/report.html?history=150&\
graph=audio_latency&trace=latency">audio_latency/latency</a>']
"""
def CreateTraceURL(test_name):
names = test_name.split('/')
url = SetQueryParameter(graph_url, 'graph', names[0])
if len(names) > 1:
url = SetQueryParameter(url, 'trace', names[1], append_param=True)
return url
urls = []
for name in test_names:
urls.append('<a href="%s">%s</a>' % (CreateTraceURL(name), name))
return urls
def GetNewBuilderResult():
return {
REGRESS: [],
IMPROVE: [],
EMAIL_TIME: None,
GRAPH_URL: ''
}
def SetQueryParameter(url, param_name, param_value, append_param=False):
"""Returns a url with the parameter value pair updated or added.
If append_param=True then the URL will append a new param value to the URL.
"""
if not url:
return '%s=%s' % (param_name, param_value)
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
if append_param and param_name in query_params:
query_params[param_name].append(param_value)
else:
query_params[param_name] = [param_value]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
| bsd-3-clause |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/encodings/cp866.py | 1 | 32712 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_map)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u2116' # 0x00fc -> NUMERO SIGN
u'\xa4' # 0x00fd -> CURRENCY SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
mxia/engine | build/apply_locales.py | 295 | 1455 | #!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
lasr/orbital_elements | mee/constant_thrust.py | 1 | 1672 | import numpy as np
from orbital_elements.mee.gve import GVE
__author__ = "Nathan I. Budd"
__email__ = "nibudd@gmail.com"
__copyright__ = "Copyright 2017, LASR Lab"
__license__ = "MIT"
__version__ = "0.1"
__status__ = "Production"
__date__ = "19 Mar 2017"
class ConstantThrust(object):
"""Constant LVLH acceleration as MEE time derivatives.
Attributes:
u: ndarray
3-element array representing the LVLH acceleration in the radial,
theta, and normal directions.
mu: float, optional
Standard Gravitational Parameter. Defaults to 1.0, the standard
value in canonical units.
"""
def __init__(self, u, mu=1.0):
self.u = u.reshape((1, 3, 1))
self.mu = mu
def __call__(self, T, X):
"""Calculate constant acceleration as MEE time derivatives.
Args:
T: ndarray
(m, 1) array of times.
X: ndarray
(m, 6) array of modified equinoctial elements ordered as
(p, f, g, h, k, L), where
p = semi-latus rectum
f = 1-component of eccentricity vector in perifocal frame
g = 2-component of eccentricity vector in perifocal frame
h = 1-component of ascending node vector in equinoctial frame
k = 2-component of ascending node vector in equinoctial frame
L = true longitude
Returns:
Xdot: ndarray
(m, 6) array of state derivatives.
"""
m = T.shape[0]
u = np.tile(self.u, (m, 1, 1))
G = GVE()(T, X)
return (G @ u).reshape((m, 6))
| mit |
MrSenko/Kiwi | tcms/management/tests/test_admin.py | 2 | 1578 | from django.db import connection
from django.test.utils import CaptureQueriesContext
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from tcms.management.models import Component
from tcms.tests import LoggedInTestCase
from tcms.tests.factories import BuildFactory
from tcms.utils.permissions import initiate_user_with_default_setups
class TestComponentAdmin(LoggedInTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
initiate_user_with_default_setups(cls.tester)
def test_generated_database_queries(self):
expected_query = str(
Component.objects.select_related("product", "initial_owner")
.order_by("name", "-id")
.query
)
with CaptureQueriesContext(connection) as context:
self.client.get(reverse("admin:management_component_changelist"))
for query in context.captured_queries:
if expected_query == query["sql"]:
break
else:
self.fail("Component select related query not found.")
class TestBuildAdmin(LoggedInTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
initiate_user_with_default_setups(cls.tester)
cls.build = BuildFactory()
def test_changelist_view_product_name(self):
response = self.client.get(reverse("admin:management_build_changelist"))
self.assertContains(response, _("Product"))
self.assertContains(response, self.build.version.product)
| gpl-2.0 |
AAROC/invenio | invenio/utils/orcid.py | 18 | 2072 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import subprocess
from json import json
class OrcidSearch:
def search_authors(self, query):
query = query.replace(" ", "+")
"""
FIXME: Don't create a process to do this!
"""
p = subprocess.Popen("curl -H 'Accept: application/orcid+json' \
'http://pub.sandbox-1.orcid.org/search/orcid-bio?q=" + \
query + "&start=0&rows=10'", \
shell=True, \
stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT)
jsonResults = ""
for line in p.stdout.readlines():
jsonResults = line
self.authorsDict = json.loads(jsonResults)
def get_authors_names(self):
author_names = []
try:
for author in self.authorsDict['orcid-search-results']['orcid-search-result']:
given_name = author['orcid-profile']['orcid-bio']['personal-details']['given-names']['value']
family_name = author['orcid-profile']['orcid-bio']['personal-details']['family-name']['value']
name = family_name + " " + given_name
author_names.append(name)
return author_names
except KeyError:
return []
| gpl-2.0 |
google/autocjk | src/utils/font_helper.py | 1 | 2696 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for handling font file access."""
from typing import Text, List, Iterable
from fontTools import ttLib
from fontTools.ttLib import ttFont
from src.utils import region as region_lib
_RANGES = [
# (1) CJK Unified Ideographs
(0x4E00, 0x9FFF),
# (2) CJK Unified Ideographs Extension A
(0x3400, 0x4DBF),
# (3) CJK Unified Ideographs Extension B
(0x20000, 0x2A5DF),
# (4) CJK Unified Ideographs Extension C
(0x2A700, 0x2B73F),
# (5) CJK Unified Ideographs Extension D
(0x2B740, 0x2B81F),
# (6) CJK Unified Ideographs Extension E
(0x2B820, 0x2CEAF),
# (7) CJK Unified Ideographs Extension F
(0x2CEB0, 0x2EBEF),
# (8) CJK Unified Ideographs Extension G
(0x30000, 0x3134F),
]
def _fast_is_cjk(x) -> bool:
"""Given a character ordinal, returns whether or not it is CJK."""
return any(l <= x <= r for l, r in _RANGES)
def _cjk_characters() -> Iterable[Text]:
"""An iterator of all CJK codepoints."""
for l, r in _RANGES:
for i in range(l, r):
yield chr(i)
class FontHelper:
def __init__(self, font: ttLib.TTFont, input_region: region_lib.Region):
self.font = font
self.region = input_region
self.glyph_set = self.font.getGlyphSet(preferCFF=True)
self._cmap = self.font.getBestCmap()
def has_ttglyph(self, character: Text) -> bool:
"""Returns True if self.font has a rendering for this character."""
if len(character) > 1:
# If this is an IDS, let's assume we don't have it.
return False
return ord(character) in self._cmap and self._cmap[ord(
character)] in self.glyph_set
def unknown_characters(self) -> Iterable[Text]:
"""Returns an iterator of CJK ideographic characters NOT in this font."""
for c in _cjk_characters():
if ord(c) not in self._cmap.keys():
yield c
def known_characters(self) -> List[Text]:
"""Returns the list of CJK ideographic characters in this font."""
return list(map(chr, filter(_fast_is_cjk, self._cmap.keys())))
| apache-2.0 |
Kast0rTr0y/ansible | lib/ansible/modules/cloud/amazon/ec2_elb_lb.py | 14 | 53190 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
require: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
require: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
require: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import time
import random
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
result = self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, StandardError) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, StandardError) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=str(e))
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
self.module.fail_json(msg=str(e))
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
idle_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = ec2_connect(module)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
chitrangpatel/presto | python/binresponses/monte_short.py | 2 | 3593 | from time import clock
from math import *
from Numeric import *
from presto import *
from miscutils import *
from Statistics import *
import Pgplot
# Some admin variables
showplots = 0 # True or false
showsumplots = 0 # True or false
debugout = 0 # True or false
outfiledir = '/home/ransom'
outfilenm = 'monte'
pmass = 1.35 # Pulsar mass in solar masses
cmass = {'WD': 0.3, 'NS': 1.35, 'BH': 10.0} # Companion masses to use
ecc = {'WD': 0.0, 'NS': 0.6, 'BH': 0.6} # Eccentricities to use
orbsperpt = {'WD': 20, 'NS': 20, 'BH': 20} # # of orbits to avg per pt
ppsr = [0.002, 0.02, 0.2] # Pulsar periods to test
# Simulation parameters
ctype = 'BH' # The type of binary companion: 'WD', 'NS', or 'BH'
Pb = 7200.0 # Orbital period in seconds
dt = 0.0001 # The duration of each data sample (s)
searchtype = 'short' # One of 'ffdot', 'sideband', 'short'
Tfft = 60.0 # Length of FFTs in seconds (must evenly divide Pb)
numbetween = 2
##################################################
# You shouldn't need to edit anyting below here. #
##################################################
outfilenm = (outfiledir+'/'+outfilenm+
'_'+searchtype+`Tfft`+'_'+ctype+'.out')
def psrparams_from_list(pplist):
psr = psrparams()
psr.p = pplist[0]
psr.orb.p = pplist[1]
psr.orb.x = pplist[2]
psr.orb.e = pplist[3]
psr.orb.w = pplist[4]
psr.orb.t = pplist[5]
return psr
####################################################################
# Open a file to save each orbit calculation
file = open(outfilenm,'w')
numffts = int(Pb / Tfft)
TbyPb = (arange(numffts, typecode='d')+1.0)/numffts
xb = asini_c(Pb, mass_funct2(pmass, cmass[ctype], pi / 3.0))
for pp in ppsr:
pows = zeros(orbsperpt[ctype], 'd')
stim = clock()
numbins = 0
for ct in range(orbsperpt[ctype]):
wb = ct * 180.0 / orbsperpt[ctype]
psr = psrparams_from_list([pp, Pb, xb, ecc[ctype], wb, 0.0])
tmpnumbins = 2 * numbetween * bin_resp_halfwidth(psr.p, Pb, psr.orb)
if tmpnumbins > numbins: numbins = tmpnumbins
# Powers averaged over orb.t as a function of orb.w
pwrs_w = zeros((orbsperpt[ctype], numbins), Float32)
for ct in range(orbsperpt[ctype]):
wb = ct * 180.0 / orbsperpt[ctype]
if debugout: print 'wb = '+`wb`
psr = psrparams_from_list([pp, Pb, xb, ecc[ctype], wb, 0.0])
for i in range(numffts):
psr.orb.t = i * Tfft
tmppwrs = spectralpower(gen_bin_response(0.0, numbetween,
psr.p, Tfft,
psr.orb, numbins))
if debugout: print ' tb = '+`psr.orb.t`+' Max pow = '+\
`max(tmppwrs)`
if showplots:
Pgplot.plotxy(tmppwrs)
Pgplot.closeplot()
pwrs_w[ct] = pwrs_w[ct] + tmppwrs
if showsumplots:
Pgplot.plotxy(pwrs_w[ct], title='power(w) averaged over orb.t')
Pgplot.closeplot()
pwrs_w = pwrs_w / numffts
max_avg_pow = average(maximum.reduce(pwrs_w,1))
if showsumplots:
Pgplot.plotxy(add.reduce(pwrs_w), title='power(w) averaged over orb.t')
Pgplot.closeplot()
tim = clock() - stim
if debugout:
print 'Time for this point was ',tim, ' s.'
file.write('%8.6f %10.5f %10d %13.9f\n' % \
(pp, Tfft, int(Tfft/dt), max_avg_pow))
file.flush()
file.close()
| gpl-2.0 |
canadacoin/pasutest | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
rryan/django-cms | cms/admin/placeholderadmin.py | 12 | 26205 | # -*- coding: utf-8 -*-
import json
from cms.utils.compat import DJANGO_1_7
from django.conf import settings
from django.conf.urls import url
from django.contrib.admin.helpers import AdminForm
from django.contrib.admin.util import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import router, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseNotFound,
HttpResponseForbidden, HttpResponseRedirect)
from django.shortcuts import render, get_object_or_404
from django.template.defaultfilters import force_escape, escapejs
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from cms.constants import PLUGIN_COPY_ACTION, PLUGIN_MOVE_ACTION
from cms.exceptions import PluginLimitReached
from cms.models.placeholdermodel import Placeholder
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import copy_plugins, permissions, get_language_from_request, get_cms_setting
from cms.utils.i18n import get_language_list
from cms.utils.plugins import requires_reload, has_reached_plugin_limit, reorder_plugins
from cms.utils.urlutils import admin_reverse
class FrontendEditableAdminMixin(object):
frontend_editable_fields = []
def get_urls(self):
"""
Register the url for the single field edit view
"""
from cms.urls import SLUG_REGEXP
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'edit-field/(%s)/([a-z\-]+)/$' % SLUG_REGEXP, self.edit_field),
]
return url_patterns + super(FrontendEditableAdminMixin, self).get_urls()
def _get_object_for_single_field(self, object_id, language):
# Quick and dirty way to retrieve objects for django-hvad
# Cleaner implementation will extend this method in a child mixin
try:
return self.model.objects.language(language).get(pk=object_id)
except AttributeError:
return self.model.objects.get(pk=object_id)
def edit_field(self, request, object_id, language):
obj = self._get_object_for_single_field(object_id, language)
opts = obj.__class__._meta
saved_successfully = False
cancel_clicked = request.POST.get("_cancel", False)
raw_fields = request.GET.get("edit_fields")
fields = [field for field in raw_fields.split(",") if field in self.frontend_editable_fields]
if not fields:
context = {
'opts': opts,
'message': force_text(_("Field %s not found")) % raw_fields
}
return render(request, 'admin/cms/page/plugin/error_form.html', context)
if not request.user.has_perm("{0}.change_{1}".format(self.model._meta.app_label,
self.model._meta.model_name)):
context = {
'opts': opts,
'message': force_text(_("You do not have permission to edit this item"))
}
return render(request, 'admin/cms/page/plugin/error_form.html', context)
# Dinamically creates the form class with only `field_name` field
# enabled
form_class = self.get_form(request, obj, fields=fields)
if not cancel_clicked and request.method == 'POST':
form = form_class(instance=obj, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = form_class(instance=obj)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': opts.verbose_name,
'plugin': None,
'plugin_id': None,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
return render(request, 'admin/cms/page/plugin/change_form.html', context)
class PlaceholderAdminMixin(object):
def get_urls(self):
"""
Register the plugin specific urls (add/edit/copy/remove/move)
"""
from cms.urls import SLUG_REGEXP
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'copy-plugins/$', self.copy_plugins),
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/(%s)/$' % SLUG_REGEXP, self.edit_plugin),
pat(r'delete-plugin/(%s)/$' % SLUG_REGEXP, self.delete_plugin),
pat(r'clear-placeholder/(%s)/$' % SLUG_REGEXP, self.clear_placeholder),
pat(r'move-plugin/$', self.move_plugin),
]
return url_patterns + super(PlaceholderAdminMixin, self).get_urls()
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
if not placeholder.has_add_permission(request):
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
if not source_placeholder.has_add_permission(request) or not target_placeholder.has_add_permission(
request):
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not plugin.placeholder.has_change_permission(request):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not target_placeholder.has_change_permission(request):
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
placeholder = plugin.placeholder
if not placeholder.has_delete_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
if not placeholder.has_delete_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
pass
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
pass
def post_edit_plugin(self, request, plugin):
pass
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
pass
def post_delete_plugin(self, request, plugin):
pass
def post_clear_placeholder(self, request, placeholder):
pass
def get_placeholder_template(self, request, placeholder):
pass
@method_decorator(require_POST)
@xframe_options_sameorigin
def add_plugin(self, request):
"""
POST request should have the following data:
- placeholder_id
- plugin_type
- plugin_language
- plugin_parent (optional)
"""
parent = None
plugin_type = request.POST['plugin_type']
placeholder_id = request.POST.get('placeholder_id', None)
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
parent_id = request.POST.get('plugin_parent', None)
language = request.POST.get('plugin_language') or get_language_from_request(request)
if not self.has_add_plugin_permission(request, placeholder, plugin_type):
return HttpResponseForbidden(force_text(_('You do not have permission to add a plugin')))
try:
has_reached_plugin_limit(placeholder, plugin_type, language,
template=self.get_placeholder_template(request, placeholder))
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
# page add-plugin
if not parent_id:
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, placeholder=placeholder).count())
# in-plugin add-plugin
else:
parent = get_object_or_404(CMSPlugin, pk=parent_id)
placeholder = parent.placeholder
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, parent=parent).count())
# placeholder (non-page) add-plugin
# Sanity check to make sure we're not getting bogus values from JavaScript:
if settings.USE_I18N:
if not language or not language in [lang[0] for lang in settings.LANGUAGES]:
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
if parent and parent.language != language:
return HttpResponseBadRequest(force_text(_("Parent plugin language must be same as language!")))
else:
language = settings.LANGUAGE_CODE
plugin = CMSPlugin(language=language, plugin_type=plugin_type, position=position, placeholder=placeholder)
if parent:
plugin.position = CMSPlugin.objects.filter(parent=parent).count()
plugin.parent_id = parent.pk
plugin.save()
self.post_add_plugin(request, placeholder, plugin)
response = {
'url': force_text(
admin_reverse("%s_%s_edit_plugin" % (self.model._meta.app_label, self.model._meta.model_name),
args=[plugin.pk])),
'delete': force_text(
admin_reverse("%s_%s_delete_plugin" % (self.model._meta.app_label, self.model._meta.model_name),
args=[plugin.pk])),
'breadcrumb': plugin.get_breadcrumb(),
}
return HttpResponse(json.dumps(response), content_type='application/json')
@method_decorator(require_POST)
@xframe_options_sameorigin
@transaction.atomic
def copy_plugins(self, request):
"""
POST request should have the following data:
- source_language
- source_placeholder_id
- source_plugin_id (optional)
- target_language
- target_placeholder_id
- target_plugin_id (optional, new parent)
"""
source_language = request.POST['source_language']
source_placeholder_id = request.POST['source_placeholder_id']
source_plugin_id = request.POST.get('source_plugin_id', None)
target_language = request.POST['target_language']
target_placeholder_id = request.POST['target_placeholder_id']
target_plugin_id = request.POST.get('target_plugin_id', None)
source_placeholder = get_object_or_404(Placeholder, pk=source_placeholder_id)
target_placeholder = get_object_or_404(Placeholder, pk=target_placeholder_id)
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
if source_plugin_id:
source_plugin = get_object_or_404(CMSPlugin, pk=source_plugin_id)
reload_required = requires_reload(PLUGIN_COPY_ACTION, [source_plugin])
if source_plugin.plugin_type == "PlaceholderPlugin":
# if it is a PlaceholderReference plugin only copy the plugins it references
inst, cls = source_plugin.get_plugin_instance(self)
plugins = inst.placeholder_ref.get_plugins_list()
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(
path__startswith=source_plugin.path,
depth__gte=source_plugin.depth).order_by('path')
)
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
reload_required = requires_reload(PLUGIN_COPY_ACTION, plugins)
if not self.has_copy_plugin_permission(request, source_placeholder, target_placeholder, plugins):
return HttpResponseForbidden(force_text(_('You do not have permission to copy these plugins.')))
if target_placeholder.pk == request.toolbar.clipboard.pk and not source_plugin_id and not target_plugin_id:
# if we copy a whole placeholder to the clipboard create PlaceholderReference plugin instead and fill it
# the content of the source_placeholder.
ref = PlaceholderReference()
ref.name = source_placeholder.get_label()
ref.plugin_type = "PlaceholderPlugin"
ref.language = target_language
ref.placeholder = target_placeholder
ref.save()
ref.copy_from(source_placeholder, source_language)
else:
copy_plugins.copy_plugins_to(plugins, target_placeholder, target_language, target_plugin_id)
plugin_list = CMSPlugin.objects.filter(language=target_language, placeholder=target_placeholder).order_by(
'path')
reduced_list = []
for plugin in plugin_list:
reduced_list.append(
{
'id': plugin.pk, 'type': plugin.plugin_type, 'parent': plugin.parent_id,
'position': plugin.position, 'desc': force_text(plugin.get_short_description()),
'language': plugin.language, 'placeholder_id': plugin.placeholder_id
}
)
self.post_copy_plugins(request, source_placeholder, target_placeholder, plugins)
json_response = {'plugin_list': reduced_list, 'reload': reload_required}
return HttpResponse(json.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def edit_plugin(self, request, plugin_id):
try:
plugin_id = int(plugin_id)
except ValueError:
return HttpResponseNotFound(force_text(_("Plugin not found")))
cms_plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
instance, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
if not self.has_change_plugin_permission(request, cms_plugin):
return HttpResponseForbidden(force_text(_("You do not have permission to edit this plugin")))
plugin_admin.cms_plugin_instance = cms_plugin
try:
plugin_admin.placeholder = cms_plugin.placeholder
except Placeholder.DoesNotExist:
pass
if request.method == "POST":
# set the continue flag, otherwise will plugin_admin make redirect to list
# view, which actually doesn't exists
request.POST['_continue'] = True
if request.POST.get("_cancel", False):
# cancel button was clicked
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': cms_plugin,
'is_popup': True,
"type": cms_plugin.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(escapejs(cms_plugin.get_instance_icon_src())),
'alt': force_escape(escapejs(cms_plugin.get_instance_icon_alt())),
'cancel': True,
}
instance = cms_plugin.get_plugin_instance()[0]
if instance:
context['name'] = force_text(instance)
else:
# cancelled before any content was added to plugin
cms_plugin.delete()
context.update({
"deleted": True,
'name': force_text(cms_plugin),
})
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
if not instance:
# instance doesn't exist, call add view
response = plugin_admin.add_view(request)
else:
# already saved before, call change view
# we actually have the instance here, but since i won't override
# change_view method, is better if it will be loaded again, so
# just pass id to plugin_admin
response = plugin_admin.change_view(request, str(plugin_id))
if request.method == "POST" and plugin_admin.object_successfully_changed:
self.post_edit_plugin(request, plugin_admin.saved_object)
saved_object = plugin_admin.saved_object
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': saved_object,
'is_popup': True,
'name': force_text(saved_object),
"type": saved_object.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(saved_object.get_instance_icon_src()),
'alt': force_escape(saved_object.get_instance_icon_alt()),
}
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
return response
@method_decorator(require_POST)
@xframe_options_sameorigin
def move_plugin(self, request):
"""
POST request with following parameters:
-plugin_id
-placeholder_id
-plugin_language (optional)
-plugin_parent (optional)
-plugin_order (array, optional)
"""
plugin = CMSPlugin.objects.get(pk=int(request.POST['plugin_id']))
placeholder = Placeholder.objects.get(pk=request.POST['placeholder_id'])
parent_id = request.POST.get('plugin_parent', None)
language = request.POST.get('plugin_language', None)
source_placeholder = plugin.placeholder
if not parent_id:
parent_id = None
else:
parent_id = int(parent_id)
if not language and plugin.language:
language = plugin.language
order = request.POST.getlist("plugin_order[]")
if not self.has_move_plugin_permission(request, plugin, placeholder):
return HttpResponseForbidden(force_text(_("You have no permission to move this plugin")))
if not placeholder == source_placeholder:
try:
template = self.get_placeholder_template(request, placeholder)
has_reached_plugin_limit(placeholder, plugin.plugin_type, plugin.language, template=template)
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
if parent_id:
if plugin.parent_id != parent_id:
parent = CMSPlugin.objects.get(pk=parent_id)
if parent.placeholder_id != placeholder.pk:
return HttpResponseBadRequest(force_text('parent must be in the same placeholder'))
if parent.language != language:
return HttpResponseBadRequest(force_text('parent must be in the same language as plugin_language'))
plugin.parent_id = parent.pk
plugin.save()
plugin = plugin.move(parent, pos='last-child')
else:
sibling = CMSPlugin.get_last_root_node()
plugin.parent_id = None
plugin.save()
plugin = plugin.move(sibling, pos='right')
for child in [plugin] + list(plugin.get_descendants()):
child.placeholder = placeholder
child.language = language
child.save()
plugins = reorder_plugins(placeholder, parent_id, language, order)
if not plugins:
return HttpResponseBadRequest('order parameter did not have all plugins of the same level in it')
self.post_move_plugin(request, source_placeholder, placeholder, plugin)
json_response = {'reload': requires_reload(PLUGIN_MOVE_ACTION, [plugin])}
return HttpResponse(json.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def delete_plugin(self, request, plugin_id):
plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
if not self.has_delete_plugin_permission(request, plugin):
return HttpResponseForbidden(force_text(_("You do not have permission to delete this plugin")))
plugin_cms_class = plugin.get_plugin_class()
plugin_class = plugin_cms_class.model
opts = plugin_class._meta
using = router.db_for_write(plugin_class)
app_label = opts.app_label
if DJANGO_1_7:
deleted_objects, perms_needed, protected = get_deleted_objects(
[plugin], opts, request.user, self.admin_site, using)
else:
deleted_objects, __, perms_needed, protected = get_deleted_objects(
[plugin], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied(_("You do not have permission to delete this plugin"))
obj_display = force_text(plugin)
self.log_deletion(request, plugin, obj_display)
plugin.delete()
self.message_user(request, _('The %(name)s plugin "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name), 'obj': force_text(obj_display)})
self.post_delete_plugin(request, plugin)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": plugin_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": plugin_name,
"object": plugin,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
@xframe_options_sameorigin
def clear_placeholder(self, request, placeholder_id):
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
if not self.has_clear_placeholder_permission(request, placeholder):
return HttpResponseForbidden(force_text(_("You do not have permission to clear this placeholder")))
language = request.GET.get('language', None)
plugins = placeholder.get_plugins(language)
opts = Placeholder._meta
using = router.db_for_write(Placeholder)
app_label = opts.app_label
if DJANGO_1_7:
deleted_objects, perms_needed, protected = get_deleted_objects(
plugins, opts, request.user, self.admin_site, using)
else:
deleted_objects, __, perms_needed, protected = get_deleted_objects(
plugins, opts, request.user, self.admin_site, using)
obj_display = force_text(placeholder)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
return HttpResponseForbidden(force_text(_("You do not have permission to clear this placeholder")))
self.log_deletion(request, placeholder, obj_display)
placeholder.clear(language)
self.message_user(request, _('The placeholder "%(obj)s" was cleared successfully.') % {
'obj': force_text(obj_display)})
self.post_clear_placeholder(request, placeholder)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": obj_display}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": _("placeholder"),
"object": placeholder,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
| bsd-3-clause |
aleksandr-bakanov/astropy | astropy/io/fits/util.py | 3 | 29605 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import io
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
from astropy.utils import data
from distutils.version import LooseVersion
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
try:
# Support the Python 3.6 PathLike ABC where possible
from os import PathLike
path_like = (str, PathLike)
except ImportError:
path_like = (str,)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f'_update_{notification}'
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.currentThread()
single_thread = (threading.activeCount() == 1 and
curr_thread.getName() == 'MainThread')
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until {} is '
'complete!'.format(func.__name__),
AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.str_)):
ns = np.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.bytes_)):
raise TypeError('string operation on non-string array')
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace('\ufffd', '?')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because `open()` returns an `io.BufferedReader` by default.
This is bad, because `io.BufferedReader` doesn't support random access,
which we need in some cases. We must call open with buffering=0 to get
a raw random-access file reader.
"""
return open(filename, mode, buffering=0)
def fileobj_name(f):
"""
Returns the 'name' of file-like object f, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, str):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if f is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, str):
return True
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, 'fileobj_mode'):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, 'mode'):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return 'rb'
elif mode == gzip.WRITE:
return 'wb'
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : `~numpy.ndarray`
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
if isfile(outfile) and not isinstance(outfile, io.BufferedIOBase):
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order='C'):
fileobj.write(item.tostring())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tostring())
else:
for item in arr.flat:
fileobj.write(item.tostring())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _unsigned_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_unsigned(dtype):
return dtype.kind == 'u' and dtype.itemsize >= 2
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(input, strlen):
"""
Split a long string into parts where each part is no longer
than ``strlen`` and no word is cut into two pieces. But if
there is one single word which is longer than ``strlen``, then
it will be split in the middle of the word.
"""
words = []
nblanks = input.count(' ')
nmax = max(nblanks, len(input) // strlen + 1)
arr = np.frombuffer((input + ' ').encode('utf8'), dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
for idx in range(nmax):
try:
loc = np.nonzero(blank_loc >= strlen + offset)[0][0]
offset = blank_loc[loc - 1] + 1
if loc == 0:
offset = -1
except Exception:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
words.append(input[xoffset:offset])
if len(input) == offset:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ''
if not isinstance(hdulist, list):
hdulist = [hdulist, ]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = ("Not enough space on disk: requested {}, "
"available {}. ".format(hdulist_size, free_space))
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(
f'io/fits/tests/data/{filename}', 'astropy')
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = "({},){}u{}".format(dt.itemsize // bpc, dt.byteorder, bpc)
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
| bsd-3-clause |
fhaoquan/kbengine | kbe/res/scripts/common/Lib/test/audiotests.py | 72 | 12345 | from test.support import findfile, TESTFN, unlink
import unittest
import array
import io
import pickle
import sys
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class AudioTests:
close_fd = False
def setUp(self):
self.f = self.fout = None
def tearDown(self):
if self.f is not None:
self.f.close()
if self.fout is not None:
self.fout.close()
unlink(TESTFN)
def check_params(self, f, nchannels, sampwidth, framerate, nframes,
comptype, compname):
self.assertEqual(f.getnchannels(), nchannels)
self.assertEqual(f.getsampwidth(), sampwidth)
self.assertEqual(f.getframerate(), framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.getcomptype(), comptype)
self.assertEqual(f.getcompname(), compname)
params = f.getparams()
self.assertEqual(params,
(nchannels, sampwidth, framerate, nframes, comptype, compname))
self.assertEqual(params.nchannels, nchannels)
self.assertEqual(params.sampwidth, sampwidth)
self.assertEqual(params.framerate, framerate)
self.assertEqual(params.nframes, nframes)
self.assertEqual(params.comptype, comptype)
self.assertEqual(params.compname, compname)
dump = pickle.dumps(params)
self.assertEqual(pickle.loads(dump), params)
class AudioWriteTests(AudioTests):
def create_file(self, testfile):
f = self.fout = self.module.open(testfile, 'wb')
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
return f
def check_file(self, testfile, nframes, frames):
with self.module.open(testfile, 'rb') as f:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.readframes(nframes), frames)
def test_write_params(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.nframes, self.comptype, self.compname)
f.close()
def test_write_context_manager_calls_close(self):
# Close checks for a minimum header and will raise an error
# if it is not set, so this proves that close is called.
with self.assertRaises(self.module.Error):
with self.module.open(TESTFN, 'wb'):
pass
with self.assertRaises(self.module.Error):
with open(TESTFN, 'wb') as testfile:
with self.module.open(testfile):
pass
def test_context_manager_with_open_file(self):
with open(TESTFN, 'wb') as testfile:
with self.module.open(testfile) as f:
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'rb') as testfile:
with self.module.open(testfile) as f:
self.assertFalse(f.getfp().closed)
params = f.getparams()
self.assertEqual(params.nchannels, self.nchannels)
self.assertEqual(params.sampwidth, self.sampwidth)
self.assertEqual(params.framerate, self.framerate)
if not self.close_fd:
self.assertIsNone(f.getfp())
self.assertEqual(testfile.closed, self.close_fd)
def test_context_manager_with_filename(self):
# If the file doesn't get closed, this test won't fail, but it will
# produce a resource leak warning.
with self.module.open(TESTFN, 'wb') as f:
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
with self.module.open(TESTFN) as f:
self.assertFalse(f.getfp().closed)
params = f.getparams()
self.assertEqual(params.nchannels, self.nchannels)
self.assertEqual(params.sampwidth, self.sampwidth)
self.assertEqual(params.framerate, self.framerate)
if not self.close_fd:
self.assertIsNone(f.getfp())
def test_write(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_write_bytearray(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(bytearray(self.frames))
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_write_array(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(array.array('h', self.frames))
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_write_memoryview(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(memoryview(self.frames))
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_incompleted_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_multiple_writes(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes)
framesize = self.nchannels * self.sampwidth
f.writeframes(self.frames[:-framesize])
f.writeframes(self.frames[-framesize:])
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_overflowed_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_read(self):
with self.create_file(TESTFN) as f:
f.setnframes(self.nframes)
f.writeframes(self.frames)
with UnseekableIO(TESTFN, 'rb') as testfile:
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
with self.create_file(testfile) as f:
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_file(TESTFN, self.nframes, self.frames)
def test_unseekable_incompleted_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
try:
f.writeframes(self.frames)
except OSError:
pass
try:
f.close()
except OSError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes + 1, self.frames)
def test_unseekable_overflowed_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
try:
f.writeframes(self.frames)
except OSError:
pass
try:
f.close()
except OSError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
framesize = self.nchannels * self.sampwidth
self.check_file(testfile, self.nframes - 1, self.frames[:-framesize])
class AudioTestsWithSourceFile(AudioTests):
@classmethod
def setUpClass(cls):
cls.sndfilepath = findfile(cls.sndfilename, subdir='audiodata')
def test_read_params(self):
f = self.f = self.module.open(self.sndfilepath)
#self.assertEqual(f.getfp().name, self.sndfilepath)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.sndfilenframes, self.comptype, self.compname)
def test_close(self):
with open(self.sndfilepath, 'rb') as testfile:
f = self.f = self.module.open(testfile)
self.assertFalse(testfile.closed)
f.close()
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'wb') as testfile:
fout = self.fout = self.module.open(testfile, 'wb')
self.assertFalse(testfile.closed)
with self.assertRaises(self.module.Error):
fout.close()
self.assertEqual(testfile.closed, self.close_fd)
fout.close() # do nothing
def test_read(self):
framesize = self.nchannels * self.sampwidth
chunk1 = self.frames[:2 * framesize]
chunk2 = self.frames[2 * framesize: 4 * framesize]
f = self.f = self.module.open(self.sndfilepath)
self.assertEqual(f.readframes(0), b'')
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), chunk1)
f.rewind()
pos0 = f.tell()
self.assertEqual(pos0, 0)
self.assertEqual(f.readframes(2), chunk1)
pos2 = f.tell()
self.assertEqual(pos2, 2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos0)
self.assertEqual(f.readframes(2), chunk1)
with self.assertRaises(self.module.Error):
f.setpos(-1)
with self.assertRaises(self.module.Error):
f.setpos(f.getnframes() + 1)
def test_copy(self):
f = self.f = self.module.open(self.sndfilepath)
fout = self.fout = self.module.open(TESTFN, 'wb')
fout.setparams(f.getparams())
i = 0
n = f.getnframes()
while n > 0:
i += 1
fout.writeframes(f.readframes(i))
n -= i
fout.close()
fout = self.fout = self.module.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams(), fout.getparams())
self.assertEqual(f.readframes(f.getnframes()),
fout.readframes(fout.getnframes()))
def test_read_not_from_start(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
with open(self.sndfilepath, 'rb') as f:
testfile.write(f.read())
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
with self.module.open(testfile, 'rb') as f:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), self.sndfilenframes)
self.assertEqual(f.readframes(self.nframes), self.frames)
| lgpl-3.0 |
denovator/mochafac | lib/flask_wtf/i18n.py | 117 | 1720 | # coding: utf-8
"""
flask_wtf.i18n
~~~~~~~~~~~~~~
Internationalization support for Flask WTF.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
from flask import _request_ctx_stack
from flask_babel import get_locale
from babel import support
try:
from wtforms.i18n import messages_path
except ImportError:
from wtforms.ext.i18n.utils import messages_path
__all__ = ('Translations', 'translations')
def _get_translations():
"""Returns the correct gettext translations.
Copy from flask-babel with some modifications.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
# babel should be in extensions for get_locale
if 'babel' not in ctx.app.extensions:
return None
translations = getattr(ctx, 'wtforms_translations', None)
if translations is None:
dirname = messages_path()
translations = support.Translations.load(
dirname, [get_locale()], domain='wtforms'
)
ctx.wtforms_translations = translations
return translations
class Translations(object):
def gettext(self, string):
t = _get_translations()
if t is None:
return string
if hasattr(t, 'ugettext'):
return t.ugettext(string)
# Python 3 has no ugettext
return t.gettext(string)
def ngettext(self, singular, plural, n):
t = _get_translations()
if t is None:
if n == 1:
return singular
return plural
if hasattr(t, 'ungettext'):
return t.ungettext(singular, plural, n)
# Python 3 has no ungettext
return t.ngettext(singular, plural, n)
translations = Translations()
| apache-2.0 |
AnimeshSinha1309/WebsiteEdunet | WebsiteEdunet/env/Lib/site-packages/django/contrib/gis/geos/collections.py | 292 | 4986 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
import json
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.libgeos import get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def json(self):
if self.__class__.__name__ == 'GeometryCollection':
return json.dumps({
'type': self.__class__.__name__,
'geometries': [
{'type': geom.__class__.__name__, 'coordinates': geom.coords}
for geom in self
],
})
return super(GeometryCollection, self).json
geojson = json
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(ProjectInterpolateMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| mit |
mapr/hue | apps/spark/src/spark/data_export.py | 33 | 1578 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.encoding import smart_str
from desktop.lib import export_csvxls
LOG = logging.getLogger(__name__)
DL_FORMATS = [ 'csv', 'xls' ]
def download(api, session, cell, format):
if format not in DL_FORMATS:
LOG.error('Unknown download format "%s"' % format)
return
content_generator = SparkDataAdapter(api, session, cell)
generator = export_csvxls.create_generator(content_generator, format)
return export_csvxls.make_response(generator, format, 'script_result')
def SparkDataAdapter(api, session, cell):
response = api.fetch_data(session, cell)
content = response['output']
data = content['data']
table = data['application/vnd.livy.table.v1+json']
rows = table['data']
headers = table['headers']
yield headers, rows
| apache-2.0 |
ericvera/react-native | JSCLegacyProfiler/trace_data.py | 375 | 8013 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import unittest
"""
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120
"""
TRACE_LINE_PATTERN = re.compile(
r'^\s*(?P<task>.+)-(?P<pid>\d+)\s+(?:\((?P<tgid>.+)\)\s+)?\[(?P<cpu>\d+)\]\s+(?:(?P<flags>\S{4})\s+)?(?P<timestamp>[0-9.]+):\s+(?P<function>.+)$')
"""
Example lines from custom app traces:
0: B|27295|providerRemove
0: E
tracing_mark_write: S|27311|NNFColdStart<D-7744962>|1112249168
"""
APP_TRACE_LINE_PATTERN = re.compile(
r'^(?P<type>.+?): (?P<args>.+)$')
"""
Example section names:
NNFColdStart
NNFColdStart<0><T7744962>
NNFColdStart<X>
NNFColdStart<T7744962>
"""
DECORATED_SECTION_NAME_PATTERN = re.compile(r'^(?P<section_name>.*?)(?:<0>)?(?:<(?P<command>.)(?P<argument>.*?)>)?$')
SYSTRACE_LINE_TYPES = set(['0', 'tracing_mark_write'])
class TraceLine(object):
def __init__(self, task, pid, tgid, cpu, flags, timestamp, function):
self.task = task
self.pid = pid
self.tgid = tgid
self.cpu = cpu
self.flags = flags
self.timestamp = timestamp
self.function = function
self.canceled = False
@property
def is_app_trace_line(self):
return isinstance(self.function, AppTraceFunction)
def cancel(self):
self.canceled = True
def __str__(self):
if self.canceled:
return ""
elif self.tgid:
return "{task:>16s}-{pid:<5d} ({tgid:5s}) [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self))
elif self.flags:
return "{task:>16s}-{pid:<5d} [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self))
else:
return "{task:>16s}-{pid:<5d} [{cpu:03d}] {timestamp:12.6f}: {function}\n".format(**vars(self))
class AppTraceFunction(object):
def __init__(self, type, args):
self.type = type
self.args = args
self.operation = args[0]
if len(args) >= 2 and args[1]:
self.pid = int(args[1])
if len(args) >= 3:
self._section_name, self.command, self.argument = _parse_section_name(args[2])
args[2] = self._section_name
else:
self._section_name = None
self.command = None
self.argument = None
self.cookie = None
@property
def section_name(self):
return self._section_name
@section_name.setter
def section_name(self, value):
self._section_name = value
self.args[2] = value
def __str__(self):
return "{type}: {args}".format(type=self.type, args='|'.join(self.args))
class AsyncTraceFunction(AppTraceFunction):
def __init__(self, type, args):
super(AsyncTraceFunction, self).__init__(type, args)
self.cookie = int(args[3])
TRACE_TYPE_MAP = {
'S': AsyncTraceFunction,
'T': AsyncTraceFunction,
'F': AsyncTraceFunction,
}
def parse_line(line):
match = TRACE_LINE_PATTERN.match(line.strip())
if not match:
return None
task = match.group("task")
pid = int(match.group("pid"))
tgid = match.group("tgid")
cpu = int(match.group("cpu"))
flags = match.group("flags")
timestamp = float(match.group("timestamp"))
function = match.group("function")
app_trace = _parse_function(function)
if app_trace:
function = app_trace
return TraceLine(task, pid, tgid, cpu, flags, timestamp, function)
def parse_dextr_line(line):
task = line["name"]
pid = line["pid"]
tgid = line["tid"]
cpu = None
flags = None
timestamp = line["ts"]
function = AppTraceFunction("DextrTrace", [line["ph"], line["pid"], line["name"]])
return TraceLine(task, pid, tgid, cpu, flags, timestamp, function)
def _parse_function(function):
line_match = APP_TRACE_LINE_PATTERN.match(function)
if not line_match:
return None
type = line_match.group("type")
if not type in SYSTRACE_LINE_TYPES:
return None
args = line_match.group("args").split('|')
if len(args) == 1 and len(args[0]) == 0:
args = None
constructor = TRACE_TYPE_MAP.get(args[0], AppTraceFunction)
return constructor(type, args)
def _parse_section_name(section_name):
if section_name is None:
return section_name, None, None
section_name_match = DECORATED_SECTION_NAME_PATTERN.match(section_name)
section_name = section_name_match.group("section_name")
command = section_name_match.group("command")
argument = section_name_match.group("argument")
return section_name, command, argument
def _format_section_name(section_name, command, argument):
if not command:
return section_name
return "{section_name}<{command}{argument}>".format(**vars())
class RoundTripFormattingTests(unittest.TestCase):
def testPlainSectionName(self):
section_name = "SectionName12345-5562342fas"
self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name)))
def testDecoratedSectionName(self):
section_name = "SectionName12345-5562342fas<D-123456>"
self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name)))
def testSimpleFunction(self):
function = "0: E"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithoutCookie(self):
function = "0: B|27295|providerRemove"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithCookie(self):
function = "0: S|27311|NNFColdStart|1112249168"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithCookieAndArgs(self):
function = "0: T|27311|NNFColdStart|1122|Start"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithArgsButNoPid(self):
function = "0: E|||foo=bar"
self.assertEqual(function, str(_parse_function(function)))
def testKitKatFunction(self):
function = "tracing_mark_write: B|14127|Looper.dispatchMessage|arg=>>>>> Dispatching to Handler (android.os.Handler) {422ae980} null: 0|Java"
self.assertEqual(function, str(_parse_function(function)))
def testNonSysTraceFunctionIgnored(self):
function = "sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120"
self.assertEqual(None, _parse_function(function))
def testLineWithFlagsAndTGID(self):
line = " <idle>-0 ( 550) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithFlagsAndNoTGID(self):
line = " <idle>-0 (-----) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithFlags(self):
line = " <idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithoutFlags(self):
line = " <idle>-0 [001] 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n"
self.assertEqual(line, str(parse_line(line)))
| bsd-3-clause |
xbezdick/tempest | tempest/api/compute/admin/test_fixed_ips.py | 13 | 2319 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class FixedIPsTestJson(base.BaseV2ComputeAdminTest):
@classmethod
def skip_checks(cls):
super(FixedIPsTestJson, cls).skip_checks()
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(FixedIPsTestJson, cls).setup_clients()
cls.client = cls.os_adm.fixed_ips_client
@classmethod
def resource_setup(cls):
super(FixedIPsTestJson, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.show_server(server['id'])['server']
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
cls.ip = ip['addr']
break
if cls.ip:
break
@test.idempotent_id('16b7d848-2f7c-4709-85a3-2dfb4576cc52')
@test.services('network')
def test_list_fixed_ip_details(self):
fixed_ip = self.client.show_fixed_ip(self.ip)
self.assertEqual(fixed_ip['fixed_ip']['address'], self.ip)
@test.idempotent_id('5485077b-7e46-4cec-b402-91dc3173433b')
@test.services('network')
def test_set_reserve(self):
self.client.reserve_fixed_ip(self.ip, reserve="None")
@test.idempotent_id('7476e322-b9ff-4710-bf82-49d51bac6e2e')
@test.services('network')
def test_set_unreserve(self):
self.client.reserve_fixed_ip(self.ip, unreserve="None")
| apache-2.0 |
alibarkatali/module_web | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| mit |
HybridF5/jacket | jacket/compute/scheduler/rpcapi.py | 1 | 6071 | # Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the scheduler manager RPC API.
"""
import oslo_messaging as messaging
import jacket.compute.conf
from jacket.objects.compute import base as objects_base
from jacket import rpc
CONF = jacket.compute.conf.CONF
class SchedulerAPI(object):
'''Client side of the scheduler rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Changes to prep_resize():
* remove instance_uuid, add instance
* remove instance_type_id, add instance_type
* remove topic, it was unused
* 1.2 - Remove topic from run_instance, it was unused
* 1.3 - Remove instance_id, add instance to live_migration
* 1.4 - Remove update_db from prep_resize
* 1.5 - Add reservations argument to prep_resize()
* 1.6 - Remove reservations argument to run_instance()
* 1.7 - Add create_volume() method, remove topic from live_migration()
* 2.0 - Remove 1.x backwards compat
* 2.1 - Add image_id to create_volume()
* 2.2 - Remove reservations argument to create_volume()
* 2.3 - Remove create_volume()
* 2.4 - Change update_service_capabilities()
* accepts a list of capabilities
* 2.5 - Add get_backdoor_port()
* 2.6 - Add select_hosts()
... Grizzly supports message version 2.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.6.
* 2.7 - Add select_destinations()
* 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used
by the compute manager for retries.
* 2.9 - Added the legacy_bdm_in_spec parameter to run_instance()
... Havana supports message version 2.9. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.9.
* Deprecated live_migration() call, moved to conductor
* Deprecated select_hosts()
3.0 - Removed backwards compat
... Icehouse and Juno support message version 3.0. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.0.
* 3.1 - Made select_destinations() send flavor object
* 4.0 - Removed backwards compat for Icehouse
* 4.1 - Add update_aggregates() and delete_aggregate()
* 4.2 - Added update_instance_info(), delete_instance_info(), and
sync_instance_info() methods
... Kilo and Liberty support message version 4.2. So, any
changes to existing methods in 4.x after that point should be
done such that they can handle the version_cap being set to
4.2.
* 4.3 - Modify select_destinations() signature by providing a
RequestSpec obj
'''
VERSION_ALIASES = {
'grizzly': '2.6',
'havana': '2.9',
'icehouse': '3.0',
'juno': '3.0',
'kilo': '4.2',
'liberty': '4.2',
}
def __init__(self):
super(SchedulerAPI, self).__init__()
target = messaging.Target(topic=CONF.scheduler_topic, version='4.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler,
CONF.upgrade_levels.scheduler)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
def select_destinations(self, ctxt, spec_obj):
version = '4.3'
msg_args = {'spec_obj': spec_obj}
if not self.client.can_send_version(version):
del msg_args['spec_obj']
msg_args['request_spec'] = spec_obj.to_legacy_request_spec_dict()
msg_args['filter_properties'
] = spec_obj.to_legacy_filter_properties_dict()
version = '4.0'
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'select_destinations', **msg_args)
def update_aggregates(self, ctxt, aggregates):
# NOTE(sbauza): Yes, it's a fanout, we need to update all schedulers
cctxt = self.client.prepare(fanout=True, version='4.1')
cctxt.cast(ctxt, 'update_aggregates', aggregates=aggregates)
def delete_aggregate(self, ctxt, aggregate):
# NOTE(sbauza): Yes, it's a fanout, we need to update all schedulers
cctxt = self.client.prepare(fanout=True, version='4.1')
cctxt.cast(ctxt, 'delete_aggregate', aggregate=aggregate)
def update_instance_info(self, ctxt, host_name, instance_info):
cctxt = self.client.prepare(version='4.2', fanout=True)
return cctxt.cast(ctxt, 'update_instance_info', host_name=host_name,
instance_info=instance_info)
def delete_instance_info(self, ctxt, host_name, instance_uuid):
cctxt = self.client.prepare(version='4.2', fanout=True)
return cctxt.cast(ctxt, 'delete_instance_info', host_name=host_name,
instance_uuid=instance_uuid)
def sync_instance_info(self, ctxt, host_name, instance_uuids):
cctxt = self.client.prepare(version='4.2', fanout=True)
return cctxt.cast(ctxt, 'sync_instance_info', host_name=host_name,
instance_uuids=instance_uuids)
| apache-2.0 |
haad/ansible | lib/ansible/plugins/lookup/inventory_hostnames.py | 57 | 2275 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Steven Dossett <sdossett@panath.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: inventory_hostnames
author:
- Michael DeHaan <michael.dehaan@gmail.com>
- Steven Dossett <sdossett@panath.com>
version_added: "1.3"
short_description: list of inventory hosts matching a host pattern
description:
- "This lookup understands 'host patterns' as used bye the `hosts:` keyword in plays
and can return a list of matching hosts from inventory"
notes:
- this is only worth for 'hostname patterns' it is easier to loop over the group/group_names variables otherwise.
"""
EXAMPLES = """
- name: show all the hosts matching the pattern, ie all but the group www
debug:
msg: "{{ item }}"
with_inventory_hostnames:
- all:!www
"""
RETURN = """
_hostnames:
description: list of hostnames that matched the host pattern in inventory
type: list
"""
from ansible.inventory.manager import split_host_pattern, order_patterns
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def get_hosts(self, variables, pattern):
hosts = []
if pattern[0] in ('!', '&'):
obj = pattern[1:]
else:
obj = pattern
if obj in variables['groups']:
hosts = variables['groups'][obj]
elif obj in variables['groups']['all']:
hosts = [obj]
return hosts
def run(self, terms, variables=None, **kwargs):
host_list = []
for term in terms:
patterns = order_patterns(split_host_pattern(term))
for p in patterns:
that = self.get_hosts(variables, p)
if p.startswith("!"):
host_list = [h for h in host_list if h not in that]
elif p.startswith("&"):
host_list = [h for h in host_list if h in that]
else:
host_list.extend(that)
# return unique list
return list(set(host_list))
| gpl-3.0 |
gbenson/i8c | tests/test_i8cfail_0003.py | 1 | 1875 | # -*- coding: utf-8 -*-
# Copyright (C) 2015-16 Red Hat, Inc.
# This file is part of the Infinity Note Compiler.
#
# The Infinity Note Compiler is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The Infinity Note Compiler is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infinity Note Compiler. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from tests import TestCase
from i8c.compiler import StackError
# All binary operations were considered equal to each other: the "mul"
# and "add" were considered equivalent and the branch optimized away.
SOURCE = """\
define test::i8cfail_0003 returns int
argument int y
argument int z
load 0
beq label
load 0
mul
return
label:
load 1
add
"""
class TestI8CFail0003(TestCase):
def test_i8cfail_0003(self):
"""Miscellaneous I8C failure #0003 check"""
tree, output = self.compile(SOURCE)
ops = output.opnames
# Robust test: check the stream contains a branch
self.assertIn("bra", ops)
# Fragile test: check that the stream is as we expect.
# This may need fixing up to cope with future compiler changes.
self.assertEqual(["bra",
"plus_uconst", "skip",
"lit0", "mul"], ops)
| lgpl-2.1 |
alrifqi/django | tests/model_formsets/tests.py | 192 | 74306 | from __future__ import unicode_literals
import datetime
import re
from datetime import date
from decimal import Decimal
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.forms.models import (
BaseModelFormSet, _get_foreign_key, inlineformset_factory,
modelformset_factory,
)
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
AlternateBook, Author, AuthorMeeting, BetterAuthor, Book, BookWithCustomPK,
BookWithOptionalAltEditor, ClassyMexicanRestaurant, CustomPrimaryKey,
Location, Membership, MexicanRestaurant, Owner, OwnerProfile, Person,
Place, Player, Poem, Poet, Post, Price, Product, Repository, Restaurant,
Revision, Team,
)
class DeletionTests(TestCase):
def test_deletion(self):
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': str(poet.pk),
'form-0-name': 'test',
'form-0-DELETE': 'on',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
formset.save(commit=False)
self.assertEqual(Poet.objects.count(), 1)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poet.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
# One existing untouched and two new unvalid forms
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'test',
'form-1-id': '',
'form-1-name': 'x' * 1000, # Too long
'form-2-id': six.text_type(poet.id), # Violate unique constraint
'form-2-name': 'test2',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data in new forms aren't actually valid.
data['form-0-DELETE'] = 'on'
data['form-1-DELETE'] = 'on'
data['form-2-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_outdated_deletion(self):
poet = Poet.objects.create(name='test')
poem = Poem.objects.create(name='Brevity is the soul of wit', poet=poet)
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", can_delete=True)
# Simulate deletion of an object that doesn't exist in the database
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-id': str(poem.pk),
'form-0-name': 'foo',
'form-1-id': str(poem.pk + 1), # doesn't exist
'form-1-name': 'bar',
'form-1-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet, prefix="form")
# The formset is valid even though poem.pk + 1 doesn't exist,
# because it's marked for deletion anyway
self.assertTrue(formset.is_valid())
formset.save()
# Make sure the save went through correctly
self.assertEqual(Poem.objects.get(pk=poem.pk).name, "foo")
self.assertEqual(poet.poem_set.count(), 1)
self.assertFalse(Poem.objects.filter(pk=poem.pk + 1).exists())
class ModelFormsetTest(TestCase):
def test_modelformset_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelformset_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelformset_factory(Author)
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /><input type="hidden" name="form-0-id" id="id_form-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /><input type="hidden" name="form-1-id" id="id_form-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Charles Baudelaire',
'form-1-name': 'Arthur Rimbaud',
'form-2-name': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire'))
self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=False)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '2', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-name': 'Paul Verlaine',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name='Paul Verlaine'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=True)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /></p>\n'
'<p><label for="id_form-0-DELETE">Delete:</label> <input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /></p>\n'
'<p><label for="id_form-1-DELETE">Delete:</label> <input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" value="Paul Verlaine" maxlength="100" /></p>\n'
'<p><label for="id_form-2-DELETE">Delete:</label> <input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" /><input type="hidden" name="form-2-id" value="%d" id="id_form-2-id" /></p>' % author3.id)
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label> <input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>\n'
'<p><label for="id_form-3-DELETE">Delete:</label> <input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" /><input type="hidden" name="form-3-id" id="id_form-3-id" /></p>')
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': 'Walt Whitman',
'form-3-DELETE': 'on',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Walt Whitman',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': '',
'form-3-DELETE': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman'))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors = Author.objects.all()
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name='John Steinbeck')
AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, fields="__all__", extra=1, can_delete=True)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(meeting.id),
'form-0-name': '2nd Tuesday of the Week Meeting',
'form-0-authors': [author2.id, author1.id, author3.id, author4.id],
'form-1-name': '',
'form-1-authors': '',
'form-1-DELETE': '',
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertQuerysetEqual(instances[0].authors.all(), [
'<Author: Charles Baudelaire>',
'<Author: John Steinbeck>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
Author.objects.create(name='Walt Whitman')
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_min_num(self):
# Test the behavior of min_num with model formsets. It should be
# added to extra.
qs = Author.objects.none()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 2)
def test_min_num_with_existing(self):
# Test the behavior of min_num with existing objects.
Author.objects.create(name='Charles Baudelaire')
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0, min_num=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super(PoetForm, self).save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Walt Whitman',
'form-1-name': 'Charles Baudelaire',
'form-2-name': '',
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, 'Vladimir Mayakovsky')
self.assertEqual(poet2.name, 'Vladimir Mayakovsky')
def test_custom_form(self):
""" Test that model_formset respects fields and exclude parameters of
custom form
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'posted')
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ('subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertNotIn("subtitle", formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertNotIn("subtitle", formset.forms[0].fields)
def test_custom_queryset_init(self):
"""
Test that a queryset can be overridden in the __init__ method.
https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#changing-the-queryset
"""
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
class BaseAuthorFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseAuthorFormSet, self).__init__(*args, **kwargs)
self.queryset = Author.objects.filter(name__startswith='Charles')
AuthorFormSet = modelformset_factory(Author, fields='__all__', formset=BaseAuthorFormSet)
formset = AuthorFormSet()
self.assertEqual(len(formset.get_queryset()), 1)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__")
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="number" name="form-0-write_speed" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '1', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': '',
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
author1, = saved
self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway'))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="number" name="form-0-write_speed" value="10" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr" /></p>' % hemingway_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>\n'
'<p><label for="id_form-1-write_speed">Write speed:</label> <input type="number" name="form-1-write_speed" id="id_form-1-write_speed" /><input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': hemingway_id,
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
'form-1-author_ptr': '',
'form-1-name': '',
'form-1-write_speed': '',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3, fields="__all__")
author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': '',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal'))
self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>'])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.get(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id" /></p>' % (author.id, book1.id))
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book2, = saved
self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels'))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertQuerysetEqual(author.book_set.order_by('title'), [
'<Book: Les Fleurs du Mal>',
'<Book: Les Paradis Artificiels>',
])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
Author.objects.create(name='Charles Baudelaire')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': '1',
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-id': '2',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
new_author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.title, 'Les Paradis Artificiels')
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label> <input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" /><input type="hidden" name="test-0-author" id="id_test-0-author" /><input type="hidden" name="test-0-id" id="id_test-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label> <input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" /><input type="hidden" name="test-1-author" id="id_test-1-author" /><input type="hidden" name="test-1-id" id="id_test-1-id" /></p>')
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
self.maxDiff = 1024
AuthorBooksFormSet2 = inlineformset_factory(Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label> <input id="id_bookwithcustompk_set-0-my_pk" type="number" name="bookwithcustompk_set-0-my_pk" step="1" /></p>\n'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label> <input id="id_bookwithcustompk_set-0-title" type="text" name="bookwithcustompk_set-0-title" maxlength="100" /><input type="hidden" name="bookwithcustompk_set-0-author" value="1" id="id_bookwithcustompk_set-0-author" /></p>')
data = {
'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithcustompk_set-0-my_pk': '77777',
'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label> <input id="id_alternatebook_set-0-title" type="text" name="alternatebook_set-0-title" maxlength="100" /></p>\n'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label> <input id="id_alternatebook_set-0-notes" type="text" name="alternatebook_set-0-notes" maxlength="100" /><input type="hidden" name="alternatebook_set-0-author" value="1" id="id_alternatebook_set-0-author" /><input type="hidden" name="alternatebook_set-0-book_ptr" id="id_alternatebook_set-0-book_ptr" /></p>')
data = {
'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
'alternatebook_set-0-title': 'Flowers of Evil',
'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.title, 'Flowers of Evil')
self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal')
@skipUnlessDBFeature('supports_partially_nullable_unique_constraints')
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(Author, BookWithOptionalAltEditor, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithoptionalalteditor_set-0-author': '1',
'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
'bookwithoptionalalteditor_set-1-author': '1',
'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super(PoemForm, self).save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__")
data = {
'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
'poem_set-MAX_NUM_FORMS': '', # the max number of forms
'poem_set-0-name': 'The Cloud in Trousers',
'poem_set-1-name': 'I',
'poem_set-2-name': '',
}
poet = Poet.objects.create(name='Vladimir Mayakovsky')
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, 'Brooklyn Bridge')
self.assertEqual(poem2.name, 'Brooklyn Bridge')
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by('-title')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Paradis Artificiels" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id" /></p>')
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label> <input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" /><input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" /><input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>')
self.assertHTMLEqual(formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label> <input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" /><input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" /><input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '5', # the number of forms rendered
'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Paradis Artificiels',
'book_set-1-id': str(book2.id),
'book_set-1-title': 'Les Fleurs du Mal',
'book_set-2-id': str(book3.id),
'book_set-2-title': 'Flowers of Evil',
'book_set-3-title': 'Revue des deux mondes',
'book_set-4-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith='F')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book3.id),
'book_set-0-title': 'Flowers of Evil',
'book_set-1-title': 'Revue des deux mondes',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_inline_formsets_with_custom_save_method_related_instance(self):
"""
The ModelForm.save() method should be able to access the related object
if it exists in the database (#24395).
"""
class PoemForm2(forms.ModelForm):
def save(self, commit=True):
poem = super(PoemForm2, self).save(commit=False)
poem.name = "%s by %s" % (poem.name, poem.poet.name)
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm2, fields="__all__")
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '',
'poem_set-0-name': 'Le Lac',
}
poet = Poet()
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
# The Poet instance is saved after the formset instantiation. This
# happens in admin's changeform_view() when adding a new object and
# some inlines in the same request.
poet.name = 'Lamartine'
poet.save()
poem = formset.save()[0]
self.assertEqual(poem.name, 'Le Lac by Lamartine')
def test_inline_formsets_with_wrong_fk_name(self):
""" Regression for #23451 """
message = "fk_name 'title' is not a ForeignKey to 'model_formsets.Author'."
with self.assertRaisesMessage(ValueError, message):
inlineformset_factory(Author, Book, fields="__all__", fk_name='title')
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey, fields="__all__")
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" name="form-0-my_pk" maxlength="10" /></p>\n'
'<p><label for="id_form-0-some_field">Some field:</label> <input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>')
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False, fields="__all__")
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '2',
'owner_set-INITIAL_FORMS': '0',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': '',
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner1, = saved
self.assertEqual(owner1.name, 'Joe Perry')
self.assertEqual(owner1.place.name, 'Giordanos')
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id" /></p>'
% owner1.auto_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label> <input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" /><input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" /><input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '3',
'owner_set-INITIAL_FORMS': '1',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': six.text_type(owner1.auto_id),
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': 'Jack Berry',
'owner_set-2-auto_id': '',
'owner_set-2-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner2, = saved
self.assertEqual(owner2.name, 'Jack Berry')
self.assertEqual(owner2.place.name, 'Giordanos')
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile, fields="__all__")
formset = FormSet()
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label> <select name="form-0-owner" id="id_form-0-owner">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">Joe Perry at Giordanos</option>\n'
'<option value="%d">Jack Berry at Giordanos</option>\n'
'</select></p>\n'
'<p><label for="id_form-0-age">Age:</label> <input type="number" name="form-0-age" id="id_form-0-age" min="0" /></p>'
% (owner1.auto_id, owner2.auto_id))
owner1 = Owner.objects.get(name='Joe Perry')
FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="number" name="ownerprofile-0-age" id="id_ownerprofile-0-age" min="0" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '0',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': '',
'ownerprofile-0-age': '54',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="number" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" min="0" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '1',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': six.text_type(owner1.auto_id),
'ownerprofile-0-age': '55',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Location, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label> <input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>\n'
'<p><label for="id_location_set-0-lon">Lon:</label> <input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" /><input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" /><input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>')
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
product1, = saved
self.assertEqual(product1.slug, 'car-red')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}])
def test_modelformset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '2', # should be ignored
'form-0-price': '12.00',
'form-0-quantity': '1',
'form-1-price': '24.00',
'form-1-quantity': '2',
}
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1, validate_max=True)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
# Now test the same thing without the validate_max flag to ensure
# default behavior is unchanged
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1)
formset = FormSet(data)
self.assertTrue(formset.is_valid())
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
price1, = saved
self.assertEqual(price1.price, Decimal('12.00'))
self.assertEqual(price1.quantity, 1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}])
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name='Test Repo')
FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__")
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
revision1, = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76')
# attempt to save the same revision against the same repo.
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}])
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name='Ringo')
FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1, fields="__all__")
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields['date_joined'].initial()
result = form.as_p()
result = re.sub(r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(?:\.[0-9]+)?', '__DATETIME__', result)
self.assertHTMLEqual(result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label> <input type="text" name="membership_set-0-date_joined" value="__DATETIME__" id="id_membership_set-0-date_joined" /><input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>\n'
'<p><label for="id_membership_set-0-karma">Karma:</label> <input type="number" name="membership_set-0-karma" id="id_membership_set-0-karma" /><input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person" /><input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>'
% person.id)
# test for validation with callable defaults. Validations rely on hidden fields
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
fields = "__all__"
def __init__(self, **kwargs):
super(MembershipForm, self).__init__(**kwargs)
self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(Person, Membership, form=MembershipForm, can_delete=False, extra=1, fields="__all__")
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined_0': six.text_type(now.strftime('%Y-%m-%d')),
'membership_set-0-date_joined_1': six.text_type(now.strftime('%H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exhibit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__")
formset = PlayerInlineFormSet()
self.assertQuerysetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
player1, = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, 'Bobby')
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
self.assertEqual(sorted(FormSet().forms[0].fields.keys()), ['restaurant', 'tacos_are_yummy'])
def test_model_formset_with_initial_model_instance(self):
# has_changed should compare model instance and primary key
# see #18898
FormSet = modelformset_factory(Poem, fields='__all__')
john_milton = Poet(name="John Milton")
john_milton.save()
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-poet': str(john_milton.id),
}
formset = FormSet(initial=[{'poet': john_milton}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_model_formset_with_initial_queryset(self):
# has_changed should work with queryset and list of pk's
# see #18898
FormSet = modelformset_factory(AuthorMeeting, fields='__all__')
Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-created': '',
'form-0-authors': list(Author.objects.values_list('id', flat=True)),
}
formset = FormSet(initial=[{'authors': Author.objects.all()}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'red_car',
'form-1-slug': 'red_car',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug.'])
FormSet = modelformset_factory(Price, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-price': '25',
'form-0-quantity': '7',
'form-1-price': '25',
'form-1-quantity': '7',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for price and quantity, which must be unique.'])
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '24',
'form-1-price': '24',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
Book.objects.create(pk=3, author=author, title='Flowers of Evil')
book_ids = author.book_set.order_by('id').values_list('id', flat=True)
data = {
'book_set-TOTAL_FORMS': '2',
'book_set-INITIAL_FORMS': '2',
'book_set-MAX_NUM_FORMS': '',
'book_set-0-title': 'The 2008 Election',
'book_set-0-author': str(author.id),
'book_set-0-id': str(book_ids[0]),
'book_set-1-title': 'The 2008 Election',
'book_set-1-author': str(author.id),
'book_set-1-id': str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
FormSet = modelformset_factory(Post, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'blah',
'form-0-slug': 'Morning',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-01-01'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title which must be unique for the date in posted.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug which must be unique for the year in posted.'])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'rawr',
'form-0-posted': '2008-08-01',
'form-1-title': 'blah',
'form-1-slug': 'Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for subtitle which must be unique for the month in posted.'])
class TestModelFormsetOverridesTroughFormMeta(TestCase):
def test_modelformset_factory_widgets(self):
widgets = {
'name': forms.TextInput(attrs={'class': 'poet'})
}
PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets)
form = PoetFormSet.form()
self.assertHTMLEqual(
"%s" % form['name'],
'<input id="id_name" maxlength="100" type="text" class="poet" name="name" />'
)
def test_inlineformset_factory_widgets(self):
widgets = {
'title': forms.TextInput(attrs={'class': 'book'})
}
BookFormSet = inlineformset_factory(Author, Book, widgets=widgets, fields="__all__")
form = BookFormSet.form()
self.assertHTMLEqual(
"%s" % form['title'],
'<input class="book" id="id_title" maxlength="100" name="title" type="text" />'
)
def test_modelformset_factory_labels_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_inlineformset_factory_labels_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_modelformset_factory_help_text_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_inlineformset_factory_help_text_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_modelformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = modelformset_factory(Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
def test_inlineformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
def test_modelformset_factory_field_class_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = modelformset_factory(Book, fields="__all__", field_classes={
'title': forms.SlugField,
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
self.assertIs(Book._meta.get_field('title').__class__, models.CharField)
self.assertIsInstance(form.fields['title'], forms.SlugField)
def test_inlineformset_factory_field_class_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", field_classes={
'title': forms.SlugField,
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
self.assertIs(Book._meta.get_field('title').__class__, models.CharField)
self.assertIsInstance(form.fields['title'], forms.SlugField)
| bsd-3-clause |
mitsei/dlkit | dlkit/abstract_osid/mapping/query_inspectors.py | 1 | 12203 | """Implementations of mapping abstract base class query_inspectors."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class LocationQueryInspector:
"""This is the query inspector for searching locations."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_coordinate_terms(self):
"""Gets the coordinate query terms.
:return: the query terms
:rtype: ``osid.search.terms.CoordinateTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.CoordinateTerm
coordinate_terms = abc.abstractproperty(fget=get_coordinate_terms)
@abc.abstractmethod
def get_contained_spatial_unit_terms(self):
"""Gets the contained spatial unit query terms.
:return: the query terms
:rtype: ``osid.search.terms.SpatialUnitTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.SpatialUnitTerm
contained_spatial_unit_terms = abc.abstractproperty(fget=get_contained_spatial_unit_terms)
@abc.abstractmethod
def get_overlapping_spatial_unit_terms(self):
"""Gets the overlapping spatial unit query terms.
:return: the query terms
:rtype: ``osid.search.terms.SpatialUnitTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.SpatialUnitTerm
overlapping_spatial_unit_terms = abc.abstractproperty(fget=get_overlapping_spatial_unit_terms)
@abc.abstractmethod
def get_spatial_unit_terms(self):
"""Gets the spatial unit query terms.
:return: the query terms
:rtype: ``osid.search.terms.SpatialUnitTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.SpatialUnitTerm
spatial_unit_terms = abc.abstractproperty(fget=get_spatial_unit_terms)
@abc.abstractmethod
def get_route_id_terms(self):
"""Gets the route ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
route_id_terms = abc.abstractproperty(fget=get_route_id_terms)
@abc.abstractmethod
def get_route_terms(self):
"""Gets the route query terms.
:return: the query terms
:rtype: ``osid.mapping.route.RouteQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.route.RouteQueryInspector
route_terms = abc.abstractproperty(fget=get_route_terms)
@abc.abstractmethod
def get_path_id_terms(self):
"""Gets the path ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
path_id_terms = abc.abstractproperty(fget=get_path_id_terms)
@abc.abstractmethod
def get_path_terms(self):
"""Gets the path query terms.
:return: the query terms
:rtype: ``osid.mapping.path.PathQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.path.PathQueryInspector
path_terms = abc.abstractproperty(fget=get_path_terms)
@abc.abstractmethod
def get_containing_location_id_terms(self):
"""Gets the containing location ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
containing_location_id_terms = abc.abstractproperty(fget=get_containing_location_id_terms)
@abc.abstractmethod
def get_containing_location_terms(self):
"""Gets the containing location query terms.
:return: the query terms
:rtype: ``osid.mapping.LocationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.LocationQueryInspector
containing_location_terms = abc.abstractproperty(fget=get_containing_location_terms)
@abc.abstractmethod
def get_contained_location_id_terms(self):
"""Gets the contained location ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
contained_location_id_terms = abc.abstractproperty(fget=get_contained_location_id_terms)
@abc.abstractmethod
def get_contained_location_terms(self):
"""Gets the contained location query terms.
:return: the query terms
:rtype: ``osid.mapping.LocationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.LocationQueryInspector
contained_location_terms = abc.abstractproperty(fget=get_contained_location_terms)
@abc.abstractmethod
def get_map_id_terms(self):
"""Gets the map ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
map_id_terms = abc.abstractproperty(fget=get_map_id_terms)
@abc.abstractmethod
def get_map_terms(self):
"""Gets the map query terms.
:return: the query terms
:rtype: ``osid.mapping.MapQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.MapQueryInspector
map_terms = abc.abstractproperty(fget=get_map_terms)
@abc.abstractmethod
def get_location_query_inspector_record(self, location_record_type):
"""Gets the location query inspector record corresponding to the given ``Location`` record ``Type``.
:param location_record_type: a location record type
:type location_record_type: ``osid.type.Type``
:return: the location query inspector record
:rtype: ``osid.mapping.records.LocationQueryInspectorRecord``
:raise: ``NullArgument`` -- ``location_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(location_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.records.LocationQueryInspectorRecord
class MapQueryInspector:
"""This is the query inspector for searching maps."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_location_id_terms(self):
"""Gets the location ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
location_id_terms = abc.abstractproperty(fget=get_location_id_terms)
@abc.abstractmethod
def get_location_terms(self):
"""Gets the location query terms.
:return: the query terms
:rtype: ``osid.mapping.LocationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.LocationQueryInspector
location_terms = abc.abstractproperty(fget=get_location_terms)
@abc.abstractmethod
def get_path_id_terms(self):
"""Gets the path ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
path_id_terms = abc.abstractproperty(fget=get_path_id_terms)
@abc.abstractmethod
def get_path_terms(self):
"""Gets the path query terms.
:return: the query terms
:rtype: ``osid.mapping.path.PathQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.path.PathQueryInspector
path_terms = abc.abstractproperty(fget=get_path_terms)
@abc.abstractmethod
def get_route_id_terms(self):
"""Gets the route ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
route_id_terms = abc.abstractproperty(fget=get_route_id_terms)
@abc.abstractmethod
def get_route_terms(self):
"""Gets the route query terms.
:return: the query terms
:rtype: ``osid.mapping.route.RouteQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.route.RouteQueryInspector
route_terms = abc.abstractproperty(fget=get_route_terms)
@abc.abstractmethod
def get_ancestor_map_id_terms(self):
"""Gets the ancestor map ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
ancestor_map_id_terms = abc.abstractproperty(fget=get_ancestor_map_id_terms)
@abc.abstractmethod
def get_ancestor_map_terms(self):
"""Gets the ancestor map query terms.
:return: the query terms
:rtype: ``osid.mapping.MapQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.MapQueryInspector
ancestor_map_terms = abc.abstractproperty(fget=get_ancestor_map_terms)
@abc.abstractmethod
def get_descendant_map_id_terms(self):
"""Gets the descendant map ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
descendant_map_id_terms = abc.abstractproperty(fget=get_descendant_map_id_terms)
@abc.abstractmethod
def get_descendant_map_terms(self):
"""Gets the descendant map query terms.
:return: the query terms
:rtype: ``osid.mapping.MapQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.MapQueryInspector
descendant_map_terms = abc.abstractproperty(fget=get_descendant_map_terms)
@abc.abstractmethod
def get_map_query_inspector_record(self, map_record_type):
"""Gets the record query inspector interface corresponding to the given ``Map`` record ``Type``.
:param map_record_type: a map record type
:type map_record_type: ``osid.type.Type``
:return: the map query inspector record
:rtype: ``osid.mapping.records.MapQueryInspectorRecord``
:raise: ``NullArgument`` -- ``map_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(map_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.mapping.records.MapQueryInspectorRecord
| mit |
vganapath/rally | rally/plugins/openstack/context/sahara/sahara_input_data_sources.py | 6 | 5476 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from six.moves.urllib import parse
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally import osclients
from rally.plugins.openstack.cleanup import manager as resource_manager
from rally.plugins.openstack.cleanup import resources as res_cleanup
from rally.plugins.openstack.scenarios.sahara import utils
from rally.plugins.openstack.scenarios.swift import utils as swift_utils
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="sahara_input_data_sources", order=443)
class SaharaInputDataSources(context.Context):
"""Context class for setting up Input Data Sources for an EDP job."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"input_type": {
"enum": ["swift", "hdfs"],
},
"input_url": {
"type": "string",
},
"swift_files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"download_url": {
"type": "string"
}
},
"additionalProperties": False,
"required": ["name", "download_url"]
}
}
},
"additionalProperties": False,
"required": ["input_type", "input_url"]
}
@logging.log_task_wrapper(LOG.info,
_("Enter context: `Sahara Input Data Sources`"))
def setup(self):
utils.init_sahara_context(self)
self.context["sahara"]["swift_objects"] = []
self.context["sahara"]["container_name"] = None
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
clients = osclients.Clients(user["credential"])
if self.config["input_type"] == "swift":
self.setup_inputs_swift(clients, tenant_id,
self.config["input_url"],
self.config["swift_files"],
user["credential"].username,
user["credential"].password)
else:
self.setup_inputs(clients, tenant_id,
self.config["input_type"],
self.config["input_url"])
def setup_inputs(self, clients, tenant_id, input_type, input_url):
input_ds = clients.sahara().data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type=input_type,
url=input_url)
self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id
def setup_inputs_swift(self, clients, tenant_id, input_url,
swift_files, username, password):
swift_scenario = swift_utils.SwiftScenario(clients=clients,
context=self.context)
container_name = "rally_" + parse.urlparse(input_url).netloc.rstrip(
".sahara")
self.context["sahara"]["container_name"] = (
swift_scenario._create_container(container_name=container_name))
for swift_file in swift_files:
content = requests.get(swift_file["download_url"]).content
self.context["sahara"]["swift_objects"].append(
swift_scenario._upload_object(
self.context["sahara"]["container_name"], content,
object_name=swift_file["name"]))
input_ds_swift = clients.sahara().data_sources.create(
name=self.generate_random_name(), description="",
data_source_type="swift", url=input_url,
credential_user=username, credential_pass=password)
self.context["tenants"][tenant_id]["sahara"]["input"] = (
input_ds_swift.id)
@logging.log_task_wrapper(LOG.info, _("Exit context: `Sahara Input Data"
"Sources`"))
def cleanup(self):
resources = ["data_sources"]
for swift_object in self.context["sahara"]["swift_objects"]:
res_cleanup.SwiftObject(resource=swift_object[1])
res_cleanup.SwiftContainer(
resource=self.context["sahara"]["container_name"])
# TODO(boris-42): Delete only resources created by this context
resource_manager.cleanup(
names=["sahara.%s" % res for res in resources],
users=self.context.get("users", []))
| apache-2.0 |
proversity-org/configuration | util/docker_images.py | 13 | 1144 | import yaml
import os
import pathlib2
import itertools
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR", "")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
def get_used_images(images):
"""
Returns the images and their ranks that are scheduled to be built and that exist in the configuration file.
Input:
images: A set of Docker images
"""
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all images in config file to a list of tuples (<image>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted images and input images
return [x for x in weights_list if x[0] in images]
| agpl-3.0 |
alibbaba/plugin.video.live.streamspro | plugin.video.live.streamspro/resources/lib/resolvers/mybeststream.py | 2 | 1518 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.libraries import client
from resources.lib.libraries import unwise
def resolve(url):
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')
result = client.request(url, referer=referer)
result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
result = unwise.execute(result)
strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
strm = [i for i in strm if i.startswith('rtmp')][0]
url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
return url
except:
return
| gpl-2.0 |
LinuxChristian/home-assistant | homeassistant/components/device_tracker/snmp.py | 3 | 4055 | """
Support for fetching WiFi associations through SNMP.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.snmp/
"""
import binascii
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pysnmp==4.3.9']
CONF_COMMUNITY = 'community'
CONF_AUTHKEY = 'authkey'
CONF_PRIVKEY = 'privkey'
CONF_BASEOID = 'baseoid'
DEFAULT_COMMUNITY = 'public'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Inclusive(CONF_AUTHKEY, 'keys'): cv.string,
vol.Inclusive(CONF_PRIVKEY, 'keys'): cv.string,
vol.Required(CONF_BASEOID): cv.string
})
# pylint: disable=unused-argument
def get_scanner(hass, config):
"""Validate the configuration and return an snmp scanner."""
scanner = SnmpScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SnmpScanner(DeviceScanner):
"""Queries any SNMP capable Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.entity import config as cfg
self.snmp = cmdgen.CommandGenerator()
self.host = cmdgen.UdpTransportTarget((config[CONF_HOST], 161))
if CONF_AUTHKEY not in config or CONF_PRIVKEY not in config:
self.auth = cmdgen.CommunityData(config[CONF_COMMUNITY])
else:
self.auth = cmdgen.UsmUserData(
config[CONF_COMMUNITY],
config[CONF_AUTHKEY],
config[CONF_PRIVKEY],
authProtocol=cfg.usmHMACSHAAuthProtocol,
privProtocol=cfg.usmAesCfb128Protocol
)
self.baseoid = cmdgen.MibVariable(config[CONF_BASEOID])
self.last_results = []
# Test the router is accessible
data = self.get_snmp_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results
if client.get('mac')]
# Supressing no-self-use warning
# pylint: disable=R0201
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
# We have no names
return None
def _update_info(self):
"""Ensure the information from the device is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
data = self.get_snmp_data()
if not data:
return False
self.last_results = data
return True
def get_snmp_data(self):
"""Fetch MAC addresses from access point via SNMP."""
devices = []
errindication, errstatus, errindex, restable = self.snmp.nextCmd(
self.auth, self.host, self.baseoid)
if errindication:
_LOGGER.error("SNMPLIB error: %s", errindication)
return
# pylint: disable=no-member
if errstatus:
_LOGGER.error("SNMP error: %s at %s", errstatus.prettyPrint(),
errindex and restable[int(errindex) - 1][0] or '?')
return
for resrow in restable:
for _, val in resrow:
try:
mac = binascii.hexlify(val.asOctets()).decode('utf-8')
except AttributeError:
continue
_LOGGER.debug("Found MAC %s", mac)
mac = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)])
devices.append({'mac': mac})
return devices
| apache-2.0 |
eayunstack/ceilometer | ceilometer/notification.py | 6 | 12976 | #
# Copyright 2012-2013 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
from oslo_context import context
from oslo_log import log
import oslo_messaging
from stevedore import extension
from ceilometer.agent import plugin_base as base
from ceilometer import coordination
from ceilometer.event import endpoint as event_endpoint
from ceilometer.i18n import _, _LI, _LW
from ceilometer import exchange_control
from ceilometer import messaging
from ceilometer import pipeline
from ceilometer import service_base
from ceilometer import utils
LOG = log.getLogger(__name__)
OPTS = [
cfg.IntOpt('pipeline_processing_queues',
default=10,
min=1,
help='Number of queues to parallelize workload across. This '
'value should be larger than the number of active '
'notification agents for optimal results.'),
cfg.BoolOpt('ack_on_event_error',
default=True,
deprecated_group='collector',
help='Acknowledge message when event persistence fails.'),
cfg.BoolOpt('store_events',
deprecated_group='collector',
default=False,
help='Save event details.'),
cfg.BoolOpt('disable_non_metric_meters',
default=True,
help='WARNING: Ceilometer historically offered the ability to '
'store events as meters. This usage is NOT advised as it '
'can flood the metering database and cause performance '
'degradation.'),
cfg.BoolOpt('workload_partitioning',
default=False,
help='Enable workload partitioning, allowing multiple '
'notification agents to be run simultaneously.'),
cfg.MultiStrOpt('messaging_urls',
default=[],
secret=True,
help="Messaging URLs to listen for notifications. "
"Example: transport://user:pass@host1:port"
"[,hostN:portN]/virtual_host "
"(DEFAULT/transport_url is used if empty)"),
]
cfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS)
cfg.CONF.register_opts(OPTS, group="notification")
cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging',
group='publisher_notifier')
class NotificationService(service_base.BaseService):
"""Notification service.
When running multiple agents, additional queuing sequence is required for
inter process communication. Each agent has two listeners: one to listen
to the main OpenStack queue and another listener(and notifier) for IPC to
divide pipeline sink endpoints. Coordination should be enabled to have
proper active/active HA.
"""
NOTIFICATION_NAMESPACE = 'ceilometer.notification'
NOTIFICATION_IPC = 'ceilometer-pipe'
def __init__(self, *args, **kwargs):
super(NotificationService, self).__init__(*args, **kwargs)
self.partition_coordinator = None
self.listeners, self.pipeline_listeners = [], []
self.group_id = None
@classmethod
def _get_notifications_manager(cls, pm):
return extension.ExtensionManager(
namespace=cls.NOTIFICATION_NAMESPACE,
invoke_on_load=True,
invoke_args=(pm, )
)
def _get_notifiers(self, transport, pipe):
notifiers = []
for x in range(cfg.CONF.notification.pipeline_processing_queues):
notifiers.append(oslo_messaging.Notifier(
transport,
driver=cfg.CONF.publisher_notifier.telemetry_driver,
publisher_id='ceilometer.notification',
topic='%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, x)))
return notifiers
def _get_pipe_manager(self, transport, pipeline_manager):
if cfg.CONF.notification.workload_partitioning:
pipe_manager = pipeline.SamplePipelineTransportManager()
for pipe in pipeline_manager.pipelines:
key = pipeline.get_pipeline_grouping_key(pipe)
pipe_manager.add_transporter(
(pipe.source.support_meter, key or ['resource_id'],
self._get_notifiers(transport, pipe)))
else:
pipe_manager = pipeline_manager
return pipe_manager
def _get_event_pipeline_manager(self, transport):
if cfg.CONF.notification.store_events:
if cfg.CONF.notification.workload_partitioning:
event_pipe_manager = pipeline.EventPipelineTransportManager()
for pipe in self.event_pipeline_manager.pipelines:
event_pipe_manager.add_transporter(
(pipe.source.support_event, ['event_type'],
self._get_notifiers(transport, pipe)))
else:
event_pipe_manager = self.event_pipeline_manager
return event_pipe_manager
def start(self):
super(NotificationService, self).start()
self.pipeline_manager = pipeline.setup_pipeline()
if cfg.CONF.notification.store_events:
self.event_pipeline_manager = pipeline.setup_event_pipeline()
self.transport = messaging.get_transport()
if cfg.CONF.notification.workload_partitioning:
self.ctxt = context.get_admin_context()
self.group_id = self.NOTIFICATION_NAMESPACE
self.partition_coordinator = coordination.PartitionCoordinator()
self.partition_coordinator.start()
self.partition_coordinator.join_group(self.group_id)
else:
# FIXME(sileht): endpoint uses the notification_topics option
# and it should not because this is an oslo_messaging option
# not a ceilometer. Until we have something to get the
# notification_topics in another way, we must create a transport
# to ensure the option has been registered by oslo_messaging.
messaging.get_notifier(self.transport, '')
self.group_id = None
self.pipe_manager = self._get_pipe_manager(self.transport,
self.pipeline_manager)
self.event_pipe_manager = self._get_event_pipeline_manager(
self.transport)
self.listeners, self.pipeline_listeners = [], []
self._configure_main_queue_listeners(self.pipe_manager,
self.event_pipe_manager)
if cfg.CONF.notification.workload_partitioning:
self._configure_pipeline_listeners()
self.partition_coordinator.watch_group(self.group_id,
self._refresh_agent)
self.tg.add_timer(cfg.CONF.coordination.heartbeat,
self.partition_coordinator.heartbeat)
self.tg.add_timer(cfg.CONF.coordination.check_watchers,
self.partition_coordinator.run_watchers)
if not cfg.CONF.notification.disable_non_metric_meters:
LOG.warning(_LW('Non-metric meters may be collected. It is highly '
'advisable to disable these meters using '
'ceilometer.conf or the pipeline.yaml'))
# Add a dummy thread to have wait() working
self.tg.add_timer(604800, lambda: None)
self.init_pipeline_refresh()
def _configure_main_queue_listeners(self, pipe_manager,
event_pipe_manager):
notification_manager = self._get_notifications_manager(pipe_manager)
if not list(notification_manager):
LOG.warning(_('Failed to load any notification handlers for %s'),
self.NOTIFICATION_NAMESPACE)
ack_on_error = cfg.CONF.notification.ack_on_event_error
endpoints = []
if cfg.CONF.notification.store_events:
endpoints.append(
event_endpoint.EventsNotificationEndpoint(event_pipe_manager))
targets = []
for ext in notification_manager:
handler = ext.obj
if (cfg.CONF.notification.disable_non_metric_meters and
isinstance(handler, base.NonMetricNotificationBase)):
continue
LOG.debug('Event types from %(name)s: %(type)s'
' (ack_on_error=%(error)s)',
{'name': ext.name,
'type': ', '.join(handler.event_types),
'error': ack_on_error})
# NOTE(gordc): this could be a set check but oslo_messaging issue
# https://bugs.launchpad.net/oslo.messaging/+bug/1398511
# This ensures we don't create multiple duplicate consumers.
for new_tar in handler.get_targets(cfg.CONF):
if new_tar not in targets:
targets.append(new_tar)
endpoints.append(handler)
urls = cfg.CONF.notification.messaging_urls or [None]
for url in urls:
transport = messaging.get_transport(url)
listener = messaging.get_notification_listener(
transport, targets, endpoints)
listener.start()
self.listeners.append(listener)
def _refresh_agent(self, event):
self._configure_pipeline_listeners(True)
def _configure_pipeline_listeners(self, reuse_listeners=False):
ev_pipes = []
if cfg.CONF.notification.store_events:
ev_pipes = self.event_pipeline_manager.pipelines
pipelines = self.pipeline_manager.pipelines + ev_pipes
transport = messaging.get_transport()
partitioned = self.partition_coordinator.extract_my_subset(
self.group_id,
range(cfg.CONF.notification.pipeline_processing_queues))
queue_set = {}
for pipe_set, pipe in itertools.product(partitioned, pipelines):
queue_set['%s-%s-%s' %
(self.NOTIFICATION_IPC, pipe.name, pipe_set)] = pipe
if reuse_listeners:
topics = queue_set.keys()
kill_list = []
for listener in self.pipeline_listeners:
if listener.dispatcher.targets[0].topic in topics:
queue_set.pop(listener.dispatcher.targets[0].topic)
else:
kill_list.append(listener)
for listener in kill_list:
utils.kill_listeners([listener])
self.pipeline_listeners.remove(listener)
else:
utils.kill_listeners(self.pipeline_listeners)
self.pipeline_listeners = []
for topic, pipe in queue_set.items():
LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name,
pipe_set)
pipe_endpoint = (pipeline.EventPipelineEndpoint
if isinstance(pipe, pipeline.EventPipeline)
else pipeline.SamplePipelineEndpoint)
listener = messaging.get_notification_listener(
transport,
[oslo_messaging.Target(topic=topic)],
[pipe_endpoint(self.ctxt, pipe)])
listener.start()
self.pipeline_listeners.append(listener)
def stop(self):
if self.partition_coordinator:
self.partition_coordinator.stop()
utils.kill_listeners(self.listeners + self.pipeline_listeners)
super(NotificationService, self).stop()
def reload_pipeline(self):
LOG.info(_LI("Reloading notification agent and listeners."))
if self.pipeline_validated:
self.pipe_manager = self._get_pipe_manager(
self.transport, self.pipeline_manager)
if self.event_pipeline_validated:
self.event_pipe_manager = self._get_event_pipeline_manager(
self.transport)
# re-start the main queue listeners.
utils.kill_listeners(self.listeners)
self._configure_main_queue_listeners(
self.pipe_manager, self.event_pipe_manager)
# re-start the pipeline listeners if workload partitioning
# is enabled.
if cfg.CONF.notification.workload_partitioning:
self._configure_pipeline_listeners()
| apache-2.0 |
ubiar/odoo | addons/website_report/controllers/main.py | 243 | 1460 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.website.controllers.main import Website
from openerp.http import request, route
class Website(Website):
@route()
def customize_template_get(self, xml_id, full=False):
res = super(Website, self).customize_template_get(xml_id, full=full)
if full:
for r in request.session.get('report_view_ids', []):
res += super(Website, self).customize_template_get(r.get('xml_id'), full=full)
return res
| agpl-3.0 |
ariestiyansyah/python-social-auth | social/backends/battlenet.py | 74 | 1828 | from social.backends.oauth import BaseOAuth2
# This provides a backend for python-social-auth. This should not be confused
# with officially battle.net offerings. This piece of code is not officially
# affiliated with Blizzard Entertainment, copyrights to their respective
# owners. See http://us.battle.net/en/forum/topic/13979588015 for more details.
class BattleNetOAuth2(BaseOAuth2):
""" battle.net Oauth2 backend"""
name = 'battlenet-oauth2'
ID_KEY = 'accountId'
REDIRECT_STATE = False
AUTHORIZATION_URL = 'https://eu.battle.net/oauth/authorize'
ACCESS_TOKEN_URL = 'https://eu.battle.net/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
REVOKE_TOKEN_METHOD = 'GET'
DEFAULT_SCOPE = ['wow.profile']
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', 'expires'),
('token_type', 'token_type', True)
]
def get_characters(self, access_token):
"""
Fetches the character list from the battle.net API. Returns list of
characters or empty list if the request fails.
"""
params = {'access_token': access_token}
if self.setting('API_LOCALE'):
params['locale'] = self.setting('API_LOCALE')
response = self.get_json(
'https://eu.api.battle.net/wow/user/characters',
params=params
)
return response.get('characters') or []
def get_user_details(self, response):
""" Return user details from Battle.net account """
return {'battletag': response.get('battletag')}
def user_data(self, access_token, *args, **kwargs):
""" Loads user data from service """
return self.get_json(
'https://eu.api.battle.net/account/user/battletag',
params={'access_token': access_token}
)
| bsd-3-clause |
sloria/osf.io | api_tests/users/serializers/test_serializers.py | 4 | 1775 | import pytest
import mock
from api.users.serializers import UserSerializer
from osf_tests.factories import (
UserFactory,
)
from tests.utils import make_drf_request_with_version
@pytest.fixture()
@mock.patch('website.search.elastic_search.update_user')
def user(mock_update_user):
user = UserFactory()
user.jobs = [{
'title': 'Veterinarian/Owner',
'ongoing': True,
'startYear': '2009',
'startMonth': 4,
'institution': 'Happy Paws Vet'
}]
user.schools = [{
'endYear': '1994',
'ongoing': False,
'endMonth': 6,
'startYear': '1990',
'department': 'Veterinary Medicine',
'startMonth': 8,
'institution': 'UC Davis'
}]
user.save()
return user
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestUserSerializer:
def test_user_serializer(self, user):
req = make_drf_request_with_version(version='2.0')
result = UserSerializer(user, context={'request': req}).data
data = result['data']
assert data['id'] == user._id
assert data['type'] == 'users'
# Attributes
attributes = data['attributes']
assert attributes['family_name'] == user.family_name
assert attributes['given_name'] == user.given_name
assert attributes['active'] == user.is_active
assert attributes['employment'] == user.jobs
assert attributes['education'] == user.schools
# Relationships
relationships = data['relationships']
assert 'quickfiles' in relationships
assert 'nodes' in relationships
assert 'institutions' in relationships
assert 'preprints' in relationships
assert 'registrations' in relationships
| apache-2.0 |
bopo/django-ratings | djangoratings/views.py | 6 | 5043 | from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404
from exceptions import *
from django.conf import settings
from default_settings import RATINGS_VOTES_PER_IP
class AddRatingView(object):
def __call__(self, request, content_type_id, object_id, field_name, score):
"""__call__(request, content_type_id, object_id, field_name, score)
Adds a vote to the specified model field."""
try:
instance = self.get_instance(content_type_id, object_id)
except ObjectDoesNotExist:
raise Http404('Object does not exist')
context = self.get_context(request)
context['instance'] = instance
try:
field = getattr(instance, field_name)
except AttributeError:
return self.invalid_field_response(request, context)
context.update({
'field': field,
'score': score,
})
had_voted = bool(field.get_rating_for_user(request.user, request.META['REMOTE_ADDR'], request.COOKIES))
context['had_voted'] = had_voted
try:
adds = field.add(score, request.user, request.META.get('REMOTE_ADDR'), request.COOKIES)
except IPLimitReached:
return self.too_many_votes_from_ip_response(request, context)
except AuthRequired:
return self.authentication_required_response(request, context)
except InvalidRating:
return self.invalid_rating_response(request, context)
except CannotChangeVote:
return self.cannot_change_vote_response(request, context)
except CannotDeleteVote:
return self.cannot_delete_vote_response(request, context)
if had_voted:
return self.rating_changed_response(request, context, adds)
return self.rating_added_response(request, context, adds)
def get_context(self, request, context={}):
return context
def render_to_response(self, template, context, request):
raise NotImplementedError
def too_many_votes_from_ip_response(self, request, context):
response = HttpResponse('Too many votes from this IP address for this object.')
return response
def rating_changed_response(self, request, context, adds={}):
response = HttpResponse('Vote changed.')
if 'cookie' in adds:
cookie_name, cookie = adds['cookie_name'], adds['cookie']
if 'deleted' in adds:
response.delete_cookie(cookie_name)
else:
response.set_cookie(cookie_name, cookie, 31536000, path='/') # TODO: move cookie max_age to settings
return response
def rating_added_response(self, request, context, adds={}):
response = HttpResponse('Vote recorded.')
if 'cookie' in adds:
cookie_name, cookie = adds['cookie_name'], adds['cookie']
if 'deleted' in adds:
response.delete_cookie(cookie_name)
else:
response.set_cookie(cookie_name, cookie, 31536000, path='/') # TODO: move cookie max_age to settings
return response
def authentication_required_response(self, request, context):
response = HttpResponse('You must be logged in to vote.')
response.status_code = 403
return response
def cannot_change_vote_response(self, request, context):
response = HttpResponse('You have already voted.')
response.status_code = 403
return response
def cannot_delete_vote_response(self, request, context):
response = HttpResponse('You can\'t delete this vote.')
response.status_code = 403
return response
def invalid_field_response(self, request, context):
response = HttpResponse('Invalid field name.')
response.status_code = 403
return response
def invalid_rating_response(self, request, context):
response = HttpResponse('Invalid rating value.')
response.status_code = 403
return response
def get_instance(self, content_type_id, object_id):
return ContentType.objects.get(pk=content_type_id)\
.get_object_for_this_type(pk=object_id)
class AddRatingFromModel(AddRatingView):
def __call__(self, request, model, app_label, object_id, field_name, score):
"""__call__(request, model, app_label, object_id, field_name, score)
Adds a vote to the specified model field."""
try:
content_type = ContentType.objects.get(model=model, app_label=app_label)
except ContentType.DoesNotExist:
raise Http404('Invalid `model` or `app_label`.')
return super(AddRatingFromModel, self).__call__(request, content_type.id,
object_id, field_name, score)
| bsd-2-clause |
navotsil/Open-Knesset | polyorg/tests.py | 14 | 1189 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from persons.models import Person
from models import Candidate, CandidateList
class CreationTest(TestCase):
def setUp(self):
self.persons = [
Person.objects.create(name='Linus'),
Person.objects.create(name='Guido'),
Person.objects.create(name='Jacob'),
]
def test_candidatelist(self):
"""
Tests the creation of CandiateList and it's basic methods
"""
cl1 = CandidateList.objects.create(name="Imagine", ballot="I")
for p, i in zip(self.persons, range(1,len(self.persons)+1)):
Candidate.objects.create(candidates_list=cl1, person=p, ordinal=i)
cl1.save()
self.assertFalse(cl1.member_ids)
cl2 = CandidateList(name="Think", ballot="T", surplus_partner=cl1)
cl2.save()
self.assertEqual(cl1.surplus_partner, cl2)
cl1.delete()
cl2.delete()
def teardown(self):
for p in self.persons: p.delete()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.